From 22bcea576694b4fa47009b73d458760ce349f900 Mon Sep 17 00:00:00 2001 From: maqi Date: Mon, 24 Feb 2020 16:43:47 +0800 Subject: [PATCH 01/47] kudu code scan --- .../java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java | 3 --- .../java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java | 3 --- .../com/dtstack/flink/sql/side/kudu/table/KuduSideParser.java | 1 + .../java/com/dtstack/flink/sql/side/kudu/utils/KuduUtil.java | 1 + .../com/dtstack/flink/sql/sink/kudu/table/KuduSinkParser.java | 1 + 5 files changed, 3 insertions(+), 6 deletions(-) diff --git a/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java b/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java index 047665928..cf2c70f1f 100644 --- a/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java +++ b/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java @@ -227,9 +227,6 @@ private KuduScanner getConn(KuduSideTableInfo tableInfo) { if (null != workerCount) { kuduClientBuilder.workerCount(workerCount); } - if (null != defaultSocketReadTimeoutMs) { - kuduClientBuilder.defaultSocketReadTimeoutMs(defaultSocketReadTimeoutMs); - } if (null != defaultOperationTimeoutMs) { kuduClientBuilder.defaultOperationTimeoutMs(defaultOperationTimeoutMs); diff --git a/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java b/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java index 4a7457418..12689a07b 100644 --- a/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java +++ b/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java @@ -82,9 +82,6 @@ private void connKuDu() throws KuduException { if (null != workerCount) { asyncKuduClientBuilder.workerCount(workerCount); } - if (null != defaultSocketReadTimeoutMs) { - asyncKuduClientBuilder.defaultSocketReadTimeoutMs(defaultSocketReadTimeoutMs); - } if (null != defaultOperationTimeoutMs) { asyncKuduClientBuilder.defaultOperationTimeoutMs(defaultOperationTimeoutMs); diff --git a/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideParser.java b/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideParser.java index c03c470a7..83864fa22 100644 --- a/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideParser.java +++ b/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideParser.java @@ -107,6 +107,7 @@ public Class dbTypeConvertToJavaType(String fieldType) { return BigDecimal.class; case "binary": return byte[].class; + default: } throw new RuntimeException("不支持 " + fieldType + " 类型"); diff --git a/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/utils/KuduUtil.java b/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/utils/KuduUtil.java index c5cd8cdb2..91c94a2b2 100644 --- a/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/utils/KuduUtil.java +++ b/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/utils/KuduUtil.java @@ -190,6 +190,7 @@ public static KuduPredicate buildKuduPredicate(Schema schema, PredicateInfo info return KuduPredicate.newIsNotNullPredicate(column); case "IS NULL": return KuduPredicate.newIsNullPredicate(column); + default: } return null; diff --git a/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduSinkParser.java b/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduSinkParser.java index ea61bd325..e38a85390 100644 --- a/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduSinkParser.java +++ b/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduSinkParser.java @@ -86,6 +86,7 @@ public Class dbTypeConvertToJavaType(String fieldType) { return BigDecimal.class; case "binary": return byte[].class; + default: } throw new RuntimeException("不支持 " + fieldType + " 类型"); From 86d05ee69c838e1d70e09719281006639c32e55e Mon Sep 17 00:00:00 2001 From: maqi Date: Tue, 25 Feb 2020 15:01:30 +0800 Subject: [PATCH 02/47] mongo scan --- docs/mongoSide.md | 5 +- docs/mongoSink.md | 7 +-- .../flink/sql/side/mongo/MongoAllReqRow.java | 48 ++++---------- .../sql/side/mongo/MongoAsyncReqRow.java | 62 ++++++------------- .../flink/sql/side/mongo/utils/MongoUtil.java | 1 + .../sql/sink/mongo/MongoOutputFormat.java | 34 +--------- 6 files changed, 38 insertions(+), 119 deletions(-) diff --git a/docs/mongoSide.md b/docs/mongoSide.md index db557f441..73ce9644f 100644 --- a/docs/mongoSide.md +++ b/docs/mongoSide.md @@ -40,8 +40,6 @@ |----|---|---|----| | type |表明 输出表类型 mongo|是|| | address | 连接mongo数据库 jdbcUrl |是|| - | userName | mongo连接用户名|否|| - | password | mongo连接密码|否|| | tableName | mongo表名称|是|| | database | mongo表名称|是|| | cache | 维表缓存策略(NONE/LRU)|否|NONE| @@ -64,7 +62,8 @@ create table sideTable( PERIOD FOR SYSTEM_TIME )WITH( type ='mongo', - address ='172.21.32.1:27017,172.21.32.1:27017', + //mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]]/[?options]] + address ='mongodb://172.21.32.1:27017,172.21.32.1:27017', database ='test', tableName ='sidetest', cache ='LRU', diff --git a/docs/mongoSink.md b/docs/mongoSink.md index b0f916aab..b5bb6df0d 100644 --- a/docs/mongoSink.md +++ b/docs/mongoSink.md @@ -33,8 +33,6 @@ CREATE TABLE tableName( |----|----|----|----| |type |表明 输出表类型 mongo|是|| |address | 连接mongo数据库 jdbcUrl |是|| -|userName | mongo连接用户名|否|| -|password | mongo连接密码|否|| |tableName | mongo表名称|是|| |database | mongo表名称|是|| |parallelism | 并行度设置|否|1| @@ -46,9 +44,8 @@ CREATE TABLE MyResult( pv VARCHAR )WITH( type ='mongo', - address ='172.21.32.1:27017,172.21.32.1:27017', - userName ='dtstack', - password ='abc123', + //mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]]/[?options]] + address ='mongodb://172.21.32.1:27017,172.21.32.1:27017', database ='test', tableName ='pv', parallelism ='1' diff --git a/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java b/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java index b416e4820..3229101c6 100644 --- a/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java +++ b/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java @@ -26,16 +26,13 @@ import com.dtstack.flink.sql.side.mongo.utils.MongoUtil; import com.mongodb.BasicDBObject; import com.mongodb.MongoClient; -import com.mongodb.MongoClientOptions; -import com.mongodb.MongoCredential; -import com.mongodb.ServerAddress; +import com.mongodb.MongoClientURI; import com.mongodb.client.FindIterable; import com.mongodb.client.MongoCollection; import com.mongodb.client.MongoCursor; import com.mongodb.client.MongoDatabase; import org.apache.calcite.sql.JoinType; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.java.typeutils.RowTypeInfo; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -49,7 +46,6 @@ import java.sql.SQLException; import java.sql.Timestamp; -import java.util.ArrayList; import java.util.Calendar; import java.util.List; import java.util.Map; @@ -180,34 +176,13 @@ private String buildKey(Map val, List equalFieldList) { return sb.toString(); } - private MongoCollection getConn(String address, String userName, String password, String database, String tableName) { + private MongoCollection getConn(String address, String database, String tableName) { MongoCollection dbCollection; - try { - MongoCredential credential; - String[] servers = address.split(","); - String host; - Integer port; - String[] hostAndPort; - List lists = new ArrayList<>(); - for (String server : servers) { - hostAndPort = server.split(":"); - host = hostAndPort[0]; - port = Integer.parseInt(hostAndPort[1]); - lists.add(new ServerAddress(host, port)); - } - if (!StringUtils.isEmpty(userName) || !StringUtils.isEmpty(password)) { - credential = MongoCredential.createCredential(userName, database, password.toCharArray()); - // To connect to mongodb server - mongoClient = new MongoClient(lists, credential, new MongoClientOptions.Builder().build()); - } else { - mongoClient = new MongoClient(lists); - } - db = mongoClient.getDatabase(database); - dbCollection = db.getCollection(tableName, Document.class); - return dbCollection; - } catch (Exception e) { - throw new RuntimeException("[connMongoDB]:" + e.getMessage()); - } + mongoClient = new MongoClient(new MongoClientURI(address)); + db = mongoClient.getDatabase(database); + dbCollection = db.getCollection(tableName, Document.class); + return dbCollection; + } private void loadData(Map>> tmpCache) throws SQLException { @@ -217,8 +192,7 @@ private void loadData(Map>> tmpCache) throws SQ try { for (int i = 0; i < CONN_RETRY_NUM; i++) { try { - dbCollection = getConn(tableInfo.getAddress(), tableInfo.getUserName(), tableInfo.getPassword(), - tableInfo.getDatabase(), tableInfo.getTableName()); + dbCollection = getConn(tableInfo.getAddress(), tableInfo.getDatabase(), tableInfo.getTableName()); break; } catch (Exception e) { if (i == CONN_RETRY_NUM - 1) { @@ -237,9 +211,9 @@ private void loadData(Map>> tmpCache) throws SQ //load data from table String[] sideFieldNames = sideInfo.getSideSelectFields().split(","); - BasicDBObject basicDBObject = new BasicDBObject(); + BasicDBObject basicDbObject = new BasicDBObject(); for (String selectField : sideFieldNames) { - basicDBObject.append(selectField, 1); + basicDbObject.append(selectField, 1); } BasicDBObject filterObject = new BasicDBObject(); try { @@ -256,7 +230,7 @@ private void loadData(Map>> tmpCache) throws SQ } - FindIterable findIterable = dbCollection.find(filterObject).projection(basicDBObject).limit(FETCH_SIZE); + FindIterable findIterable = dbCollection.find(filterObject).projection(basicDbObject).limit(FETCH_SIZE); MongoCursor mongoCursor = findIterable.iterator(); while (mongoCursor.hasNext()) { Document doc = mongoCursor.next(); diff --git a/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncReqRow.java b/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncReqRow.java index 8ac7d83ee..5ef06c80e 100644 --- a/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncReqRow.java +++ b/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncReqRow.java @@ -29,11 +29,12 @@ import com.dtstack.flink.sql.side.mongo.utils.MongoUtil; import com.mongodb.BasicDBObject; import com.mongodb.Block; +import com.mongodb.ConnectionString; import com.mongodb.MongoCredential; import com.mongodb.ServerAddress; import com.mongodb.async.SingleResultCallback; import com.mongodb.async.client.MongoClient; -import com.mongodb.async.client.MongoClientSettings; +import com.mongodb.MongoClientSettings; import com.mongodb.async.client.MongoClients; import com.mongodb.async.client.MongoCollection; import com.mongodb.async.client.MongoDatabase; @@ -70,13 +71,11 @@ public class MongoAsyncReqRow extends AsyncReqRow { private static final Logger LOG = LoggerFactory.getLogger(MongoAsyncReqRow.class); - private final static int DEFAULT_MAX_DB_CONN_POOL_SIZE = 20; - private transient MongoClient mongoClient; private MongoDatabase db; - private MongoSideTableInfo MongoSideTableInfo; + private MongoSideTableInfo mongoSideTableInfo; public MongoAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { super(new MongoAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); @@ -85,48 +84,25 @@ public MongoAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List lists = new ArrayList<>(); - for (String server : servers) { - hostAndPort = server.split(":"); - host = hostAndPort[0]; - port = Integer.parseInt(hostAndPort[1]); - lists.add(new ServerAddress(host, port)); - } - ClusterSettings clusterSettings = ClusterSettings.builder().hosts(lists).build(); - ConnectionPoolSettings connectionPoolSettings = ConnectionPoolSettings.builder() - .maxSize(DEFAULT_MAX_DB_CONN_POOL_SIZE) + public void connMongoDb() throws Exception { + String address = mongoSideTableInfo.getAddress(); + ConnectionString connectionString = new ConnectionString(address); + + MongoClientSettings settings = MongoClientSettings.builder() + .applyConnectionString(connectionString) .build(); - if (!StringUtils.isEmpty(MongoSideTableInfo.getUserName()) || !StringUtils.isEmpty(MongoSideTableInfo.getPassword())) { - mongoCredential = MongoCredential.createCredential(MongoSideTableInfo.getUserName(), MongoSideTableInfo.getDatabase(), - MongoSideTableInfo.getPassword().toCharArray()); - MongoClientSettings settings = MongoClientSettings.builder().credential(mongoCredential) - .clusterSettings(clusterSettings) - .connectionPoolSettings(connectionPoolSettings) - .build(); - mongoClient = MongoClients.create(settings); - } else { - MongoClientSettings settings = MongoClientSettings.builder().clusterSettings(clusterSettings) - .connectionPoolSettings(connectionPoolSettings) - .build(); - mongoClient = MongoClients.create(settings); - } - db = mongoClient.getDatabase(MongoSideTableInfo.getDatabase()); + mongoClient = MongoClients.create(settings); + db = mongoClient.getDatabase(mongoSideTableInfo.getDatabase()); } @Override public void asyncInvoke(CRow input, ResultFuture resultFuture) throws Exception { CRow inputCopy = new CRow(input.row(), input.change()); - BasicDBObject basicDBObject = new BasicDBObject(); + BasicDBObject basicDbObject = new BasicDBObject(); for (int i = 0; i < sideInfo.getEqualFieldList().size(); i++) { Integer conValIndex = sideInfo.getEqualValIndex().get(i); Object equalObj = inputCopy.row().getField(conValIndex); @@ -134,14 +110,14 @@ public void asyncInvoke(CRow input, ResultFuture resultFuture) throws Exce dealMissKey(inputCopy, resultFuture); return; } - basicDBObject.put(sideInfo.getEqualFieldList().get(i), equalObj); + basicDbObject.put(sideInfo.getEqualFieldList().get(i), equalObj); } try { // 填充谓词 sideInfo.getSideTableInfo().getPredicateInfoes().stream().map(info -> { BasicDBObject filterCondition = MongoUtil.buildFilterObject(info); if (null != filterCondition) { - basicDBObject.append(info.getFieldName(), filterCondition); + basicDbObject.append(info.getFieldName(), filterCondition); } return info; }).count(); @@ -149,7 +125,7 @@ public void asyncInvoke(CRow input, ResultFuture resultFuture) throws Exce LOG.info("add predicate infoes error ", e); } - String key = buildCacheKey(basicDBObject.values()); + String key = buildCacheKey(basicDbObject.values()); if (openCache()) { CacheObj val = getFromCache(key); if (val != null) { @@ -171,7 +147,7 @@ public void asyncInvoke(CRow input, ResultFuture resultFuture) throws Exce } } AtomicInteger atomicInteger = new AtomicInteger(0); - MongoCollection dbCollection = db.getCollection(MongoSideTableInfo.getTableName(), Document.class); + MongoCollection dbCollection = db.getCollection(mongoSideTableInfo.getTableName(), Document.class); List cacheContent = Lists.newArrayList(); Block printDocumentBlock = new Block() { @Override @@ -197,7 +173,7 @@ public void onResult(final Void result, final Throwable t) { } } }; - dbCollection.find(basicDBObject).forEach(printDocumentBlock, callbackWhenFinished); + dbCollection.find(basicDbObject).forEach(printDocumentBlock, callbackWhenFinished); } @Override diff --git a/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/utils/MongoUtil.java b/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/utils/MongoUtil.java index 6e0177759..4a9b77b9f 100644 --- a/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/utils/MongoUtil.java +++ b/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/utils/MongoUtil.java @@ -55,6 +55,7 @@ public static BasicDBObject buildFilterObject(PredicateInfo info) { return new BasicDBObject("$exists", true); case "IS NULL": return new BasicDBObject("$exists", false); + default: } return null; } diff --git a/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoOutputFormat.java b/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoOutputFormat.java index 82bc9c15f..daaded15e 100644 --- a/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoOutputFormat.java +++ b/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoOutputFormat.java @@ -21,13 +21,10 @@ import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; import com.mongodb.MongoClient; -import com.mongodb.MongoClientOptions; -import com.mongodb.MongoCredential; -import com.mongodb.ServerAddress; +import com.mongodb.MongoClientURI; import com.mongodb.client.MongoCollection; import com.mongodb.client.MongoDatabase; import com.mongodb.client.result.UpdateResult; -import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.configuration.Configuration; @@ -37,9 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.List; /** * Reason: @@ -124,30 +118,8 @@ public void close() { } private void establishConnection() { - try { - MongoCredential credential; - String[] servers = address.split(","); - String host; - Integer port; - String[] hostAndPort; - List lists = new ArrayList<>(); - for (String server : servers) { - hostAndPort = server.split(":"); - host = hostAndPort[0]; - port = Integer.parseInt(hostAndPort[1]); - lists.add(new ServerAddress(host, port)); - } - if (!StringUtils.isEmpty(userName) || !StringUtils.isEmpty(password)) { - credential = MongoCredential.createCredential(userName, database, password.toCharArray()); - // To connect to mongodb server - mongoClient = new MongoClient(lists, credential, new MongoClientOptions.Builder().build()); - } else { - mongoClient = new MongoClient(lists); - } - db = mongoClient.getDatabase(database); - } catch (Exception e) { - throw new IllegalArgumentException("[connMongoDB]:" + e.getMessage()); - } + mongoClient = new MongoClient(new MongoClientURI(address)); + db = mongoClient.getDatabase(database); } private MongoOutputFormat() { From 40460cb57da1ac1bc548112f65fade396976a615 Mon Sep 17 00:00:00 2001 From: maqi Date: Tue, 25 Feb 2020 15:56:10 +0800 Subject: [PATCH 03/47] rdb scan --- .../side/clickhouse/ClickhouseAllReqRow.java | 7 +- .../clickhouse/ClickhouseAsyncReqRow.java | 2 +- .../sql/sink/clickhouse/ClickhouseSink.java | 9 +- .../flink/sql/side/db2/Db2AllReqRow.java | 7 +- .../flink/sql/side/db2/Db2AsyncReqRow.java | 2 +- .../com/dtstack/flink/sql/sink/db/DbSink.java | 9 +- .../sql/side/impala/ImpalaAllReqRow.java | 7 +- .../sql/side/impala/ImpalaAsyncReqRow.java | 2 +- .../flink/sql/sink/impala/ImpalaSink.java | 12 +- .../flink/sql/side/mysql/MysqlAllReqRow.java | 8 +- .../sql/side/mysql/MysqlAsyncReqRow.java | 2 +- .../flink/sql/sink/mysql/MysqlSink.java | 6 +- .../sql/side/oracle/OracleAllReqRow.java | 8 +- .../sql/side/oracle/OracleAsyncReqRow.java | 2 +- .../flink/sql/sink/oracle/OracleSink.java | 6 +- .../sql/side/polardb/PolardbAllReqRow.java | 30 +- .../sql/side/polardb/PolardbAllSideInfo.java | 22 ++ .../sql/side/polardb/PolardbAsyncReqRow.java | 24 +- .../side/polardb/PolardbAsyncSideInfo.java | 22 ++ .../side/polardb/table/PolardbSideParser.java | 23 ++ .../flink/sql/sink/polardb/PolardbSink.java | 30 +- .../sink/polardb/table/PolardbSinkParser.java | 22 ++ .../side/postgresql/PostgresqlAllReqRow.java | 8 +- .../postgresql/PostgresqlAsyncReqRow.java | 2 +- .../sql/sink/postgresql/PostgresqlSink.java | 6 +- .../flink/sql/side/rdb/all/RdbAllReqRow.java | 228 ------------ .../sql/side/rdb/async/RdbAsyncReqRow.java | 24 +- .../provider/DTC3P0DataSourceProvider.java | 10 +- .../flink/sql/side/rdb/util/SwitchUtil.java | 1 + .../{RdbSink.java => AbstractRdbSink.java} | 18 +- .../flink/sql/sink/rdb/JDBCOptions.java | 22 +- .../rdb/format/AbstractJDBCOutputFormat.java | 10 +- .../rdb/format/JDBCUpsertOutputFormat.java | 15 +- .../sql/sink/rdb/writer/AppendOnlyWriter.java | 9 +- .../sql/sink/rdb/writer/UpsertWriter.java | 338 ------------------ .../side/sqlserver/SqlserverAllReqRow.java | 16 +- .../side/sqlserver/SqlserverAllSideInfo.java | 6 +- .../side/sqlserver/SqlserverAsyncReqRow.java | 8 +- .../sqlserver/SqlserverAsyncSideInfo.java | 7 +- .../sqlserver/table/SqlserverSideParser.java | 6 +- .../sql/sink/sqlserver/SqlserverSink.java | 6 +- 41 files changed, 296 insertions(+), 706 deletions(-) delete mode 100644 rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllReqRow.java rename rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/{RdbSink.java => AbstractRdbSink.java} (90%) delete mode 100644 rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/UpsertWriter.java diff --git a/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllReqRow.java b/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllReqRow.java index 68c0c7984..a2bf8add9 100644 --- a/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllReqRow.java +++ b/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllReqRow.java @@ -21,20 +21,17 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.SideTableInfo; -import com.dtstack.flink.sql.side.rdb.all.RdbAllReqRow; -import com.dtstack.flink.sql.util.DtStringUtil; +import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.JDBCUtils; import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.shaded.guava18.com.google.common.collect.Maps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.sql.Connection; import java.sql.DriverManager; import java.util.List; -import java.util.Map; -public class ClickhouseAllReqRow extends RdbAllReqRow { +public class ClickhouseAllReqRow extends AbstractRdbAllReqRow { private static final Logger LOG = LoggerFactory.getLogger(ClickhouseAllReqRow.class); diff --git a/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java b/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java index 305d65118..3733d7350 100644 --- a/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java +++ b/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java @@ -58,7 +58,7 @@ public void open(Configuration parameters) throws Exception { vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); - setRdbSQLClient(JDBCClient.createNonShared(vertx, clickhouseClientConfig)); + setRdbSqlClient(JDBCClient.createNonShared(vertx, clickhouseClientConfig)); } } diff --git a/clickhouse/clickhouse-sink/src/main/java/com/dtstack/flink/sql/sink/clickhouse/ClickhouseSink.java b/clickhouse/clickhouse-sink/src/main/java/com/dtstack/flink/sql/sink/clickhouse/ClickhouseSink.java index bbb6a6fe4..5f7e41f7b 100644 --- a/clickhouse/clickhouse-sink/src/main/java/com/dtstack/flink/sql/sink/clickhouse/ClickhouseSink.java +++ b/clickhouse/clickhouse-sink/src/main/java/com/dtstack/flink/sql/sink/clickhouse/ClickhouseSink.java @@ -22,14 +22,11 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.rdb.JDBCOptions; -import com.dtstack.flink.sql.sink.rdb.RdbSink; +import com.dtstack.flink.sql.sink.rdb.AbstractRdbSink; import com.dtstack.flink.sql.sink.rdb.format.JDBCUpsertOutputFormat; -import java.util.List; -import java.util.Map; - -public class ClickhouseSink extends RdbSink implements IStreamSinkGener { +public class ClickhouseSink extends AbstractRdbSink implements IStreamSinkGener { public ClickhouseSink() { super(new ClickhouseDialect()); } @@ -37,7 +34,7 @@ public ClickhouseSink() { @Override public JDBCUpsertOutputFormat getOutputFormat() { JDBCOptions jdbcOptions = JDBCOptions.builder() - .setDBUrl(dbURL) + .setDbUrl(dbUrl) .setDialect(jdbcDialect) .setUsername(userName) .setPassword(password) diff --git a/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllReqRow.java b/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllReqRow.java index a77201b80..e64e55987 100644 --- a/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllReqRow.java +++ b/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllReqRow.java @@ -21,9 +21,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.SideTableInfo; -import com.dtstack.flink.sql.side.rdb.all.RdbAllReqRow; -import com.dtstack.flink.sql.util.DtStringUtil; -import com.google.common.collect.Maps; +import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,7 +29,6 @@ import java.sql.Connection; import java.sql.DriverManager; import java.util.List; -import java.util.Map; /** * Date: 2019/11/20 @@ -40,7 +37,7 @@ * @author xiuzhu */ -public class Db2AllReqRow extends RdbAllReqRow { +public class Db2AllReqRow extends AbstractRdbAllReqRow { private static final Logger LOG = LoggerFactory.getLogger(Db2AllReqRow.class); diff --git a/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java b/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java index 3d85c6fbd..59571aa56 100644 --- a/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java +++ b/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java @@ -74,7 +74,7 @@ public void open(Configuration parameters) throws Exception { vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); - setRdbSQLClient(JDBCClient.createNonShared(vertx, db2lientConfig)); + setRdbSqlClient(JDBCClient.createNonShared(vertx, db2lientConfig)); } } diff --git a/db2/db2-sink/src/main/java/com/dtstack/flink/sql/sink/db/DbSink.java b/db2/db2-sink/src/main/java/com/dtstack/flink/sql/sink/db/DbSink.java index 9942d4438..da1434ace 100644 --- a/db2/db2-sink/src/main/java/com/dtstack/flink/sql/sink/db/DbSink.java +++ b/db2/db2-sink/src/main/java/com/dtstack/flink/sql/sink/db/DbSink.java @@ -1,13 +1,10 @@ package com.dtstack.flink.sql.sink.db; import com.dtstack.flink.sql.sink.rdb.JDBCOptions; -import com.dtstack.flink.sql.sink.rdb.RdbSink; +import com.dtstack.flink.sql.sink.rdb.AbstractRdbSink; import com.dtstack.flink.sql.sink.rdb.format.JDBCUpsertOutputFormat; -import java.util.List; -import java.util.Map; - -public class DbSink extends RdbSink { +public class DbSink extends AbstractRdbSink { public DbSink() { super(new DbDialect()); @@ -15,7 +12,7 @@ public DbSink() { @Override public JDBCUpsertOutputFormat getOutputFormat() { JDBCOptions jdbcOptions = JDBCOptions.builder() - .setDBUrl(dbURL) + .setDbUrl(dbUrl) .setDialect(jdbcDialect) .setUsername(userName) .setPassword(password) diff --git a/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllReqRow.java b/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllReqRow.java index 9cd8c9194..4118421ee 100644 --- a/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllReqRow.java +++ b/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllReqRow.java @@ -22,10 +22,8 @@ import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.SideTableInfo; import com.dtstack.flink.sql.side.impala.table.ImpalaSideTableInfo; -import com.dtstack.flink.sql.side.rdb.all.RdbAllReqRow; -import com.dtstack.flink.sql.util.DtStringUtil; +import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.JDBCUtils; -import com.google.common.collect.Maps; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; @@ -36,7 +34,6 @@ import java.sql.Connection; import java.sql.DriverManager; import java.util.List; -import java.util.Map; /** * side operator with cache for all(period reload) @@ -46,7 +43,7 @@ * @author xiuzhu */ -public class ImpalaAllReqRow extends RdbAllReqRow { +public class ImpalaAllReqRow extends AbstractRdbAllReqRow { private static final long serialVersionUID = 2098635140857937717L; diff --git a/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java b/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java index 63d53b80d..1b983ca2c 100644 --- a/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java +++ b/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java @@ -66,7 +66,7 @@ public void open(Configuration parameters) throws Exception { vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); - setRdbSQLClient(JDBCClient.createNonShared(vertx, impalaClientConfig)); + setRdbSqlClient(JDBCClient.createNonShared(vertx, impalaClientConfig)); } public JsonObject getClientConfig() { diff --git a/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/ImpalaSink.java b/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/ImpalaSink.java index 41fa00926..91b6532bf 100644 --- a/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/ImpalaSink.java +++ b/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/ImpalaSink.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.impala.table.ImpalaTableInfo; import com.dtstack.flink.sql.sink.rdb.JDBCOptions; -import com.dtstack.flink.sql.sink.rdb.RdbSink; +import com.dtstack.flink.sql.sink.rdb.AbstractRdbSink; import com.dtstack.flink.sql.sink.rdb.format.JDBCUpsertOutputFormat; import com.dtstack.flink.sql.table.TargetTableInfo; import org.apache.hadoop.conf.Configuration; @@ -36,7 +36,7 @@ * @author xiuzhu */ -public class ImpalaSink extends RdbSink implements IStreamSinkGener { +public class ImpalaSink extends AbstractRdbSink implements IStreamSinkGener { private ImpalaTableInfo impalaTableInfo; @@ -47,7 +47,7 @@ public ImpalaSink() { @Override public JDBCUpsertOutputFormat getOutputFormat() { JDBCOptions jdbcOptions = JDBCOptions.builder() - .setDBUrl(getImpalaJdbcUrl()) + .setDbUrl(getImpalaJdbcUrl()) .setDialect(jdbcDialect) .setUsername(userName) .setPassword(password) @@ -70,8 +70,8 @@ public JDBCUpsertOutputFormat getOutputFormat() { public String getImpalaJdbcUrl() { Integer authMech = impalaTableInfo.getAuthMech(); - String newUrl = dbURL; - StringBuffer urlBuffer = new StringBuffer(dbURL); + String newUrl = dbUrl; + StringBuffer urlBuffer = new StringBuffer(dbUrl); if (authMech == EAuthMech.NoAuthentication.getType()) { return newUrl; } else if (authMech == EAuthMech.Kerberos.getType()) { @@ -121,7 +121,7 @@ public String getImpalaJdbcUrl() { } @Override - public RdbSink genStreamSink(TargetTableInfo targetTableInfo) { + public AbstractRdbSink genStreamSink(TargetTableInfo targetTableInfo) { super.genStreamSink(targetTableInfo); this.impalaTableInfo = (ImpalaTableInfo) targetTableInfo; return this; diff --git a/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllReqRow.java b/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllReqRow.java index b6b7f45cb..8cf6ab597 100644 --- a/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllReqRow.java +++ b/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllReqRow.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.SideTableInfo; -import com.dtstack.flink.sql.side.rdb.all.RdbAllReqRow; +import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.DtStringUtil; import org.apache.flink.api.java.typeutils.RowTypeInfo; import com.google.common.collect.Maps; @@ -41,7 +41,7 @@ * @author xuchao */ -public class MysqlAllReqRow extends RdbAllReqRow { +public class MysqlAllReqRow extends AbstractRdbAllReqRow { private static final long serialVersionUID = 2098635140857937717L; @@ -54,13 +54,13 @@ public MysqlAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List addParams = Maps.newHashMap(); addParams.put("useCursorFetch", "true"); - String targetDbUrl = DtStringUtil.addJdbcParam(dbURL, addParams, true); + String targetDbUrl = DtStringUtil.addJdbcParam(dbUrl, addParams, true); return DriverManager.getConnection(targetDbUrl, userName, password); } catch (Exception e) { LOG.error("", e); diff --git a/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncReqRow.java b/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncReqRow.java index 6120767ef..2d2f5a72a 100644 --- a/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncReqRow.java +++ b/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncReqRow.java @@ -76,7 +76,7 @@ public void open(Configuration parameters) throws Exception { vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); - setRdbSQLClient(JDBCClient.createNonShared(vertx, mysqlClientConfig)); + setRdbSqlClient(JDBCClient.createNonShared(vertx, mysqlClientConfig)); } } diff --git a/mysql/mysql-sink/src/main/java/com/dtstack/flink/sql/sink/mysql/MysqlSink.java b/mysql/mysql-sink/src/main/java/com/dtstack/flink/sql/sink/mysql/MysqlSink.java index 8c1146ede..0a1749f04 100644 --- a/mysql/mysql-sink/src/main/java/com/dtstack/flink/sql/sink/mysql/MysqlSink.java +++ b/mysql/mysql-sink/src/main/java/com/dtstack/flink/sql/sink/mysql/MysqlSink.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; -import com.dtstack.flink.sql.sink.rdb.RdbSink; +import com.dtstack.flink.sql.sink.rdb.AbstractRdbSink; import com.dtstack.flink.sql.sink.rdb.JDBCOptions; import com.dtstack.flink.sql.sink.rdb.format.JDBCUpsertOutputFormat; @@ -33,7 +33,7 @@ * @author xuchao */ -public class MysqlSink extends RdbSink implements IStreamSinkGener { +public class MysqlSink extends AbstractRdbSink implements IStreamSinkGener { public MysqlSink() { super(new MySQLDialect()); @@ -42,7 +42,7 @@ public MysqlSink() { @Override public JDBCUpsertOutputFormat getOutputFormat() { JDBCOptions jdbcOptions = JDBCOptions.builder() - .setDBUrl(dbURL) + .setDbUrl(dbUrl) .setDialect(jdbcDialect) .setUsername(userName) .setPassword(password) diff --git a/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllReqRow.java b/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllReqRow.java index 18d9ba045..5477514a5 100644 --- a/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllReqRow.java +++ b/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllReqRow.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.SideTableInfo; -import com.dtstack.flink.sql.side.rdb.all.RdbAllReqRow; +import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.DtStringUtil; import org.apache.flink.api.java.typeutils.RowTypeInfo; import com.google.common.collect.Maps; @@ -36,7 +36,7 @@ /** * side operator with cache for all(period reload) */ -public class OracleAllReqRow extends RdbAllReqRow { +public class OracleAllReqRow extends AbstractRdbAllReqRow { private static final Logger LOG = LoggerFactory.getLogger(OracleAllReqRow.class); @@ -47,11 +47,11 @@ public OracleAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List addParams = Maps.newHashMap(); - String targetDbUrl = DtStringUtil.addJdbcParam(dbURL, addParams, true); + String targetDbUrl = DtStringUtil.addJdbcParam(dbUrl, addParams, true); return DriverManager.getConnection(targetDbUrl, userName, password); } catch (Exception e) { LOG.error("", e); diff --git a/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java b/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java index f973ffee2..f6827f41b 100644 --- a/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java +++ b/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java @@ -68,6 +68,6 @@ public void open(Configuration parameters) throws Exception { vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); - setRdbSQLClient(JDBCClient.createNonShared(vertx, oracleClientConfig)); + setRdbSqlClient(JDBCClient.createNonShared(vertx, oracleClientConfig)); } } diff --git a/oracle/oracle-sink/src/main/java/com/dtstack/flink/sql/sink/oracle/OracleSink.java b/oracle/oracle-sink/src/main/java/com/dtstack/flink/sql/sink/oracle/OracleSink.java index ee0239a1f..2257d2815 100644 --- a/oracle/oracle-sink/src/main/java/com/dtstack/flink/sql/sink/oracle/OracleSink.java +++ b/oracle/oracle-sink/src/main/java/com/dtstack/flink/sql/sink/oracle/OracleSink.java @@ -19,7 +19,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.rdb.JDBCOptions; -import com.dtstack.flink.sql.sink.rdb.RdbSink; +import com.dtstack.flink.sql.sink.rdb.AbstractRdbSink; import com.dtstack.flink.sql.sink.rdb.format.JDBCUpsertOutputFormat; /** @@ -29,7 +29,7 @@ * * @author maqi */ -public class OracleSink extends RdbSink implements IStreamSinkGener { +public class OracleSink extends AbstractRdbSink implements IStreamSinkGener { public OracleSink() { super(new OracleDialect()); @@ -38,7 +38,7 @@ public OracleSink() { @Override public JDBCUpsertOutputFormat getOutputFormat() { JDBCOptions jdbcOptions = JDBCOptions.builder() - .setDBUrl(dbURL) + .setDbUrl(dbUrl) .setDialect(jdbcDialect) .setUsername(userName) .setPassword(password) diff --git a/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllReqRow.java b/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllReqRow.java index f9e0950dd..a5eec511b 100644 --- a/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllReqRow.java +++ b/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllReqRow.java @@ -1,9 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.dtstack.flink.sql.side.polardb; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.SideTableInfo; -import com.dtstack.flink.sql.side.rdb.all.RdbAllReqRow; +import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.DtStringUtil; import com.google.common.collect.Maps; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -15,7 +32,12 @@ import java.util.List; import java.util.Map; -public class PolardbAllReqRow extends RdbAllReqRow { +/** + * Date: 2019/12/20 + * Company: www.dtstack.com + * @author yinxi + */ +public class PolardbAllReqRow extends AbstractRdbAllReqRow { private static final long serialVersionUID = 2098635140857937717L; @@ -28,13 +50,13 @@ public PolardbAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List addParams = Maps.newHashMap(); addParams.put("useCursorFetch", "true"); - String targetDbUrl = DtStringUtil.addJdbcParam(dbURL, addParams, true); + String targetDbUrl = DtStringUtil.addJdbcParam(dbUrl, addParams, true); return DriverManager.getConnection(targetDbUrl, userName, password); } catch (Exception e) { LOG.error("", e); diff --git a/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllSideInfo.java b/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllSideInfo.java index 2802bd402..fa2ad63e7 100644 --- a/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllSideInfo.java +++ b/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllSideInfo.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.dtstack.flink.sql.side.polardb; import com.dtstack.flink.sql.side.FieldInfo; @@ -8,6 +25,11 @@ import java.util.List; +/** + * Date: 2019/12/20 + * Company: www.dtstack.com + * @author yinxi + */ public class PolardbAllSideInfo extends RdbAllSideInfo { public PolardbAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); diff --git a/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java b/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java index 3cbb0b368..0f8f82738 100644 --- a/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java +++ b/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.dtstack.flink.sql.side.polardb; import com.dtstack.flink.sql.side.FieldInfo; @@ -16,6 +33,11 @@ import java.util.List; +/** + * Date: 2019/12/20 + * Company: www.dtstack.com + * @author yinxi + */ public class PolardbAsyncReqRow extends RdbAsyncReqRow { private static final Logger LOG = LoggerFactory.getLogger(PolardbAsyncReqRow.class); @@ -48,6 +70,6 @@ public void open(Configuration parameters) throws Exception { vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); - setRdbSQLClient(JDBCClient.createNonShared(vertx, mysqlClientConfig)); + setRdbSqlClient(JDBCClient.createNonShared(vertx, mysqlClientConfig)); } } diff --git a/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncSideInfo.java b/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncSideInfo.java index 0b47cf3f7..f6afb8da8 100644 --- a/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncSideInfo.java +++ b/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncSideInfo.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.dtstack.flink.sql.side.polardb; import com.dtstack.flink.sql.side.FieldInfo; @@ -8,6 +25,11 @@ import java.util.List; +/** + * Date: 2019/12/20 + * Company: www.dtstack.com + * @author yinxi + */ public class PolardbAsyncSideInfo extends RdbAsyncSideInfo { public PolardbAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { diff --git a/polardb/polardb-side/polardb-side-core/src/main/java/com/dtstack/flink/sql/side/polardb/table/PolardbSideParser.java b/polardb/polardb-side/polardb-side-core/src/main/java/com/dtstack/flink/sql/side/polardb/table/PolardbSideParser.java index 348b3aed9..1afb661f0 100644 --- a/polardb/polardb-side/polardb-side-core/src/main/java/com/dtstack/flink/sql/side/polardb/table/PolardbSideParser.java +++ b/polardb/polardb-side/polardb-side-core/src/main/java/com/dtstack/flink/sql/side/polardb/table/PolardbSideParser.java @@ -1,3 +1,21 @@ + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.dtstack.flink.sql.side.polardb.table; import com.dtstack.flink.sql.side.rdb.table.RdbSideParser; @@ -5,6 +23,11 @@ import java.util.Map; +/** + * Date: 2019/12/20 + * Company: www.dtstack.com + * @author yinxi + */ public class PolardbSideParser extends RdbSideParser { private static final String CURR_TYPE = "polardb"; diff --git a/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/PolardbSink.java b/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/PolardbSink.java index 44d558140..c69f31e8f 100644 --- a/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/PolardbSink.java +++ b/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/PolardbSink.java @@ -1,11 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package com.dtstack.flink.sql.sink.polardb; import com.dtstack.flink.sql.sink.rdb.JDBCOptions; -import com.dtstack.flink.sql.sink.rdb.RdbSink; +import com.dtstack.flink.sql.sink.rdb.AbstractRdbSink; import com.dtstack.flink.sql.sink.rdb.format.JDBCUpsertOutputFormat; - -public class PolardbSink extends RdbSink { +/** + * Date: 2019/12/20 + * Company: www.dtstack.com + * @author yinxi + */ +public class PolardbSink extends AbstractRdbSink { public PolardbSink() { super(new PolardbDialect()); } @@ -13,7 +35,7 @@ public PolardbSink() { @Override public JDBCUpsertOutputFormat getOutputFormat() { JDBCOptions jdbcOptions = JDBCOptions.builder() - .setDBUrl(dbURL) + .setDbUrl(dbUrl) .setDialect(jdbcDialect) .setUsername(userName) .setPassword(password) diff --git a/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/table/PolardbSinkParser.java b/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/table/PolardbSinkParser.java index 68eead938..a57089941 100644 --- a/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/table/PolardbSinkParser.java +++ b/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/table/PolardbSinkParser.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.dtstack.flink.sql.sink.polardb.table; import com.dtstack.flink.sql.sink.rdb.table.RdbSinkParser; @@ -5,6 +22,11 @@ import java.util.Map; +/** + * Date: 2019/12/20 + * Company: www.dtstack.com + * @author yinxi + */ public class PolardbSinkParser extends RdbSinkParser { private static final String CURR_TYPE = "polardb"; diff --git a/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllReqRow.java b/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllReqRow.java index 6d68cfdca..1c9aecfe1 100644 --- a/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllReqRow.java +++ b/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllReqRow.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.SideTableInfo; -import com.dtstack.flink.sql.side.rdb.all.RdbAllReqRow; +import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.DtStringUtil; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.shaded.guava18.com.google.common.collect.Maps; @@ -41,7 +41,7 @@ * @author tcm */ -public class PostgresqlAllReqRow extends RdbAllReqRow { +public class PostgresqlAllReqRow extends AbstractRdbAllReqRow { private static final long serialVersionUID = 2098635140857937717L; @@ -54,13 +54,13 @@ public PostgresqlAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List addParams = Maps.newHashMap(); addParams.put("useCursorFetch", "true"); - String targetDbUrl = DtStringUtil.addJdbcParam(dbURL, addParams, true); + String targetDbUrl = DtStringUtil.addJdbcParam(dbUrl, addParams, true); return DriverManager.getConnection(targetDbUrl, userName, password); } catch (Exception e) { LOG.error("", e); diff --git a/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java b/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java index 02b333819..a26c8b229 100644 --- a/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java +++ b/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java @@ -68,7 +68,7 @@ public void open(Configuration parameters) throws Exception { vo.setEventLoopPoolSize(DEFAULT_VERTX_EVENT_LOOP_POOL_SIZE); vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); Vertx vertx = Vertx.vertx(vo); - setRdbSQLClient(JDBCClient.createNonShared(vertx, pgClientConfig)); + setRdbSqlClient(JDBCClient.createNonShared(vertx, pgClientConfig)); } } diff --git a/postgresql/postgresql-sink/src/main/java/com/dtstack/flink/sql/sink/postgresql/PostgresqlSink.java b/postgresql/postgresql-sink/src/main/java/com/dtstack/flink/sql/sink/postgresql/PostgresqlSink.java index cf45a6bd9..bb731152c 100644 --- a/postgresql/postgresql-sink/src/main/java/com/dtstack/flink/sql/sink/postgresql/PostgresqlSink.java +++ b/postgresql/postgresql-sink/src/main/java/com/dtstack/flink/sql/sink/postgresql/PostgresqlSink.java @@ -22,13 +22,13 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.rdb.JDBCOptions; -import com.dtstack.flink.sql.sink.rdb.RdbSink; +import com.dtstack.flink.sql.sink.rdb.AbstractRdbSink; import com.dtstack.flink.sql.sink.rdb.format.JDBCUpsertOutputFormat; /** * @author maqi */ -public class PostgresqlSink extends RdbSink implements IStreamSinkGener { +public class PostgresqlSink extends AbstractRdbSink implements IStreamSinkGener { public PostgresqlSink() { super(new PostgresqlDialect()); } @@ -36,7 +36,7 @@ public PostgresqlSink() { @Override public JDBCUpsertOutputFormat getOutputFormat() { JDBCOptions jdbcOptions = JDBCOptions.builder() - .setDBUrl(dbURL) + .setDbUrl(dbUrl) .setDialect(jdbcDialect) .setUsername(userName) .setPassword(password) diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllReqRow.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllReqRow.java deleted file mode 100644 index 48d2407aa..000000000 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllReqRow.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.dtstack.flink.sql.side.rdb.all; - -import com.dtstack.flink.sql.side.AllReqRow; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; -import com.dtstack.flink.sql.side.rdb.util.SwitchUtil; -import org.apache.calcite.sql.JoinType; -import org.apache.commons.collections.CollectionUtils; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import org.apache.flink.table.runtime.types.CRow; -import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo; -import org.apache.flink.types.Row; -import org.apache.flink.util.Collector; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.util.Calendar; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; - -/** - * side operator with cache for all(period reload) - * Date: 2018/11/26 - * Company: www.dtstack.com - * - * @author maqi - */ - -public abstract class RdbAllReqRow extends AllReqRow { - - private static final long serialVersionUID = 2098635140857937718L; - - private static final Logger LOG = LoggerFactory.getLogger(RdbAllReqRow.class); - - private static final int CONN_RETRY_NUM = 3; - - private AtomicReference>>> cacheRef = new AtomicReference<>(); - - public RdbAllReqRow(SideInfo sideInfo) { - super(sideInfo); - } - - @Override - public Row fillData(Row input, Object sideInput) { - Map cacheInfo = (Map) sideInput; - Row row = new Row(sideInfo.getOutFieldInfoList().size()); - for (Map.Entry entry : sideInfo.getInFieldIndex().entrySet()) { - Object obj = input.getField(entry.getValue()); - boolean isTimeIndicatorTypeInfo = TimeIndicatorTypeInfo.class.isAssignableFrom(sideInfo.getRowTypeInfo().getTypeAt(entry.getValue()).getClass()); - - //Type information for indicating event or processing time. However, it behaves like a regular SQL timestamp but is serialized as Long. - if (obj instanceof Timestamp && isTimeIndicatorTypeInfo) { - obj = ((Timestamp) obj).getTime(); - } - - row.setField(entry.getKey(), obj); - } - - for (Map.Entry entry : sideInfo.getSideFieldNameIndex().entrySet()) { - if (cacheInfo == null) { - row.setField(entry.getKey(), null); - } else { - row.setField(entry.getKey(), cacheInfo.get(entry.getValue())); - } - } - - return row; - } - - @Override - protected void initCache() throws SQLException { - Map>> newCache = Maps.newConcurrentMap(); - cacheRef.set(newCache); - loadData(newCache); - } - - @Override - protected void reloadCache() { - //reload cacheRef and replace to old cacheRef - Map>> newCache = Maps.newConcurrentMap(); - try { - loadData(newCache); - } catch (SQLException e) { - LOG.error("", e); - } - - cacheRef.set(newCache); - LOG.info("----- rdb all cacheRef reload end:{}", Calendar.getInstance()); - } - - - @Override - public void flatMap(CRow value, Collector out) throws Exception { - List inputParams = Lists.newArrayList(); - for (Integer conValIndex : sideInfo.getEqualValIndex()) { - Object equalObj = value.row().getField(conValIndex); - if (equalObj == null) { - if (sideInfo.getJoinType() == JoinType.LEFT) { - Row row = fillData(value.row(), null); - out.collect(new CRow(row, value.change())); - } - return; - } - inputParams.add(equalObj); - } - - String key = buildKey(inputParams); - List> cacheList = cacheRef.get().get(key); - if (CollectionUtils.isEmpty(cacheList)) { - if (sideInfo.getJoinType() == JoinType.LEFT) { - Row row = fillData(value.row(), null); - out.collect(new CRow(row, value.change())); - } else { - return; - } - - return; - } - - for (Map one : cacheList) { - out.collect(new CRow(fillData(value.row(), one), value.change())); - } - } - - private String buildKey(List equalValList) { - StringBuilder sb = new StringBuilder(""); - for (Object equalVal : equalValList) { - sb.append(equalVal).append("_"); - } - - return sb.toString(); - } - - private String buildKey(Map val, List equalFieldList) { - StringBuilder sb = new StringBuilder(""); - for (String equalField : equalFieldList) { - sb.append(val.get(equalField)).append("_"); - } - - return sb.toString(); - } - - public abstract Connection getConn(String dbURL, String userName, String password); - - - private void loadData(Map>> tmpCache) throws SQLException { - RdbSideTableInfo tableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); - Connection connection = null; - - try { - for (int i = 0; i < CONN_RETRY_NUM; i++) { - try { - connection = getConn(tableInfo.getUrl(), tableInfo.getUserName(), tableInfo.getPassword()); - break; - } catch (Exception e) { - if (i == CONN_RETRY_NUM - 1) { - throw new RuntimeException("", e); - } - try { - String connInfo = "url:" + tableInfo.getUrl() + ";userName:" + tableInfo.getUserName() + ",pwd:" + tableInfo.getPassword(); - LOG.warn("get conn fail, wait for 5 sec and try again, connInfo:" + connInfo); - Thread.sleep(5 * 1000); - } catch (InterruptedException e1) { - LOG.error("", e1); - } - } - - } - - //load data from table - String sql = sideInfo.getSqlCondition(); - Statement statement = connection.createStatement(); - statement.setFetchSize(getFetchSize()); - ResultSet resultSet = statement.executeQuery(sql); - String[] sideFieldNames = sideInfo.getSideSelectFields().split(","); - String[] fields = sideInfo.getSideTableInfo().getFieldTypes(); - while (resultSet.next()) { - Map oneRow = Maps.newHashMap(); - for (String fieldName : sideFieldNames) { - Object object = resultSet.getObject(fieldName.trim()); - int fieldIndex = sideInfo.getSideTableInfo().getFieldList().indexOf(fieldName.trim()); - object = SwitchUtil.getTarget(object, fields[fieldIndex]); - oneRow.put(fieldName.trim(), object); - } - - String cacheKey = buildKey(oneRow, sideInfo.getEqualFieldList()); - List> list = tmpCache.computeIfAbsent(cacheKey, key -> Lists.newArrayList()); - list.add(oneRow); - } - } catch (Exception e) { - LOG.error("", e); - } finally { - if (connection != null) { - connection.close(); - } - } - } - - public int getFetchSize() { - return 1000; - } - -} diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java index 7123e257b..bf7f8f774 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java @@ -20,16 +20,12 @@ package com.dtstack.flink.sql.side.rdb.async; import com.dtstack.flink.sql.enums.ECacheContentType; -import com.dtstack.flink.sql.side.*; +import com.dtstack.flink.sql.side.AsyncReqRow; +import com.dtstack.flink.sql.side.CacheMissVal; +import com.dtstack.flink.sql.side.SideInfo; import com.dtstack.flink.sql.side.cache.CacheObj; import com.dtstack.flink.sql.side.rdb.util.SwitchUtil; -import io.vertx.core.AsyncResult; -import io.vertx.core.Handler; -import io.vertx.core.Vertx; -import io.vertx.core.VertxOptions; import io.vertx.core.json.JsonArray; -import io.vertx.core.json.JsonObject; -import io.vertx.ext.jdbc.JDBCClient; import io.vertx.ext.sql.SQLClient; import io.vertx.ext.sql.SQLConnection; import com.google.common.collect.Lists; @@ -41,8 +37,6 @@ import org.slf4j.LoggerFactory; import java.sql.Timestamp; -import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -73,7 +67,7 @@ public class RdbAsyncReqRow extends AsyncReqRow { public final static String PREFERRED_TEST_QUERY_SQL = "select 1 from dual"; - private transient SQLClient rdbSQLClient; + private transient SQLClient rdbSqlClient; public RdbAsyncReqRow(SideInfo sideInfo) { super(sideInfo); @@ -113,7 +107,7 @@ public void asyncInvoke(CRow input, ResultFuture resultFuture) throws Exce } } - rdbSQLClient.getConnection(conn -> { + rdbSqlClient.getConnection(conn -> { if (conn.failed()) { //Treatment failures resultFuture.completeExceptionally(conn.cause()); @@ -195,8 +189,8 @@ public Row fillData(Row input, Object line) { @Override public void close() throws Exception { super.close(); - if (rdbSQLClient != null) { - rdbSQLClient.close(); + if (rdbSqlClient != null) { + rdbSqlClient.close(); } } @@ -211,8 +205,8 @@ public String buildCacheKey(JsonArray jsonArray) { return sb.toString(); } - public void setRdbSQLClient(SQLClient rdbSQLClient) { - this.rdbSQLClient = rdbSQLClient; + public void setRdbSqlClient(SQLClient rdbSqlClient) { + this.rdbSqlClient = rdbSqlClient; } } diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/provider/DTC3P0DataSourceProvider.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/provider/DTC3P0DataSourceProvider.java index fcca4f0ef..542258257 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/provider/DTC3P0DataSourceProvider.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/provider/DTC3P0DataSourceProvider.java @@ -25,13 +25,19 @@ import javax.sql.DataSource; import java.beans.PropertyVetoException; import java.sql.SQLException; - +/** + * Date: 2019/9/17 + * Company: www.dtstack.com + * @author maqi + */ public class DTC3P0DataSourceProvider extends C3P0DataSourceProvider { @Override public DataSource getDataSource(JsonObject config) throws SQLException { String url = config.getString("url"); - if (url == null) throw new NullPointerException("url cannot be null"); + if (url == null) { + throw new NullPointerException("url cannot be null"); + } String driverClass = config.getString("driver_class"); String user = config.getString("user"); String password = config.getString("password"); diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/util/SwitchUtil.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/util/SwitchUtil.java index 1205c5521..49d163d4d 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/util/SwitchUtil.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/util/SwitchUtil.java @@ -78,6 +78,7 @@ public static Object getTarget(Object obj, String targetType) { case "timestamp": case "datetime": return MathUtil.getTimestamp(obj); + default: } return obj; } diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/RdbSink.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/AbstractRdbSink.java similarity index 90% rename from rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/RdbSink.java rename to rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/AbstractRdbSink.java index c65696903..b7990f9f4 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/RdbSink.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/AbstractRdbSink.java @@ -21,7 +21,6 @@ import com.dtstack.flink.sql.sink.rdb.format.JDBCUpsertOutputFormat; import com.dtstack.flink.sql.sink.rdb.table.RdbTableInfo; import com.dtstack.flink.sql.table.TargetTableInfo; -import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -34,10 +33,6 @@ import com.dtstack.flink.sql.sink.rdb.dialect.JDBCDialect; import java.io.Serializable; -import java.math.BigDecimal; -import java.sql.Date; -import java.sql.Timestamp; -import java.sql.Types; import java.util.Arrays; import java.util.List; @@ -48,11 +43,8 @@ * * @author maqi */ -public abstract class RdbSink implements RetractStreamTableSink, Serializable, IStreamSinkGener { - - protected String driverName; - - protected String dbURL; +public abstract class AbstractRdbSink implements RetractStreamTableSink, Serializable, IStreamSinkGener { + protected String dbUrl; protected String userName; @@ -88,18 +80,18 @@ public abstract class RdbSink implements RetractStreamTableSink, Serializab protected String updateMode; - public RdbSink(JDBCDialect jdbcDialect) { + public AbstractRdbSink(JDBCDialect jdbcDialect) { this.jdbcDialect = jdbcDialect; } @Override - public RdbSink genStreamSink(TargetTableInfo targetTableInfo) { + public AbstractRdbSink genStreamSink(TargetTableInfo targetTableInfo) { RdbTableInfo rdbTableInfo = (RdbTableInfo) targetTableInfo; this.batchNum = rdbTableInfo.getBatchSize() == null ? batchNum : rdbTableInfo.getBatchSize(); this.batchWaitInterval = rdbTableInfo.getBatchWaitInterval() == null ? batchWaitInterval : rdbTableInfo.getBatchWaitInterval(); this.parallelism = rdbTableInfo.getParallelism() == null ? parallelism : rdbTableInfo.getParallelism(); - this.dbURL = rdbTableInfo.getUrl(); + this.dbUrl = rdbTableInfo.getUrl(); this.userName = rdbTableInfo.getUserName(); this.password = rdbTableInfo.getPassword(); this.tableName = rdbTableInfo.getTableName(); diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/JDBCOptions.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/JDBCOptions.java index e9a4f391f..f476bfadf 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/JDBCOptions.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/JDBCOptions.java @@ -29,7 +29,7 @@ public class JDBCOptions { - private String dbURL; + private String dbUrl; private String tableName; private String driverName; private String username; @@ -37,9 +37,9 @@ public class JDBCOptions { private String schema; private JDBCDialect dialect; - private JDBCOptions(String dbURL, String tableName, String driverName, String username, + private JDBCOptions(String dbUrl, String tableName, String driverName, String username, String password, String schema, JDBCDialect dialect) { - this.dbURL = dbURL; + this.dbUrl = dbUrl; this.tableName = tableName; this.driverName = driverName; this.username = username; @@ -48,8 +48,8 @@ private JDBCOptions(String dbURL, String tableName, String driverName, String us this.dialect = dialect; } - public String getDbURL() { - return dbURL; + public String getDbUrl() { + return dbUrl; } public String getTableName() { @@ -84,7 +84,7 @@ public static Builder builder() { public boolean equals(Object o) { if (o instanceof JDBCOptions) { JDBCOptions options = (JDBCOptions) o; - return Objects.equals(dbURL, options.dbURL) && + return Objects.equals(dbUrl, options.dbUrl) && Objects.equals(tableName, options.tableName) && Objects.equals(driverName, options.driverName) && Objects.equals(username, options.username) && @@ -100,7 +100,7 @@ public boolean equals(Object o) { * Builder of {@link JDBCOptions}. */ public static class Builder { - private String dbURL; + private String dbUrl; private String tableName; private String driverName; private String username; @@ -152,8 +152,8 @@ public Builder setSchema(String schema) { /** * required, JDBC DB url. */ - public Builder setDBUrl(String dbURL) { - this.dbURL = dbURL; + public Builder setDbUrl(String dbUrl) { + this.dbUrl = dbUrl; return this; } @@ -163,7 +163,7 @@ public Builder setDialect(JDBCDialect dialect) { } public JDBCOptions build() { - checkNotNull(dbURL, "No dbURL supplied."); + checkNotNull(dbUrl, "No dbURL supplied."); checkNotNull(tableName, "No tableName supplied."); if (this.driverName == null) { @@ -173,7 +173,7 @@ public JDBCOptions build() { }); } - return new JDBCOptions(dbURL, tableName, driverName, username, password, schema, dialect); + return new JDBCOptions(dbUrl, tableName, driverName, username, password, schema, dialect); } } } diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/AbstractJDBCOutputFormat.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/AbstractJDBCOutputFormat.java index 73f17d8fd..3378ff3f2 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/AbstractJDBCOutputFormat.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/AbstractJDBCOutputFormat.java @@ -47,16 +47,16 @@ public abstract class AbstractJDBCOutputFormat extends DtRichOutputFormat protected final String username; protected final String password; - private final String drivername; + private final String driverName; protected final String dbURL; protected transient Connection connection; - public AbstractJDBCOutputFormat(String username, String password, String drivername, String dbURL) { + public AbstractJDBCOutputFormat(String username, String password, String driverName, String dbUrl) { this.username = username; this.password = password; - this.drivername = drivername; - this.dbURL = dbURL; + this.driverName = driverName; + this.dbURL = dbUrl; } @Override @@ -64,7 +64,7 @@ public void configure(Configuration parameters) { } protected void establishConnection() throws SQLException, ClassNotFoundException, IOException { - JDBCUtils.forName(drivername, getClass().getClassLoader()); + JDBCUtils.forName(driverName, getClass().getClassLoader()); if (username == null) { connection = DriverManager.getConnection(dbURL); } else { diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/JDBCUpsertOutputFormat.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/JDBCUpsertOutputFormat.java index ff7b24979..b13b39537 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/JDBCUpsertOutputFormat.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/JDBCUpsertOutputFormat.java @@ -25,7 +25,7 @@ import com.dtstack.flink.sql.sink.rdb.dialect.JDBCDialect; import com.dtstack.flink.sql.sink.rdb.writer.AppendOnlyWriter; import com.dtstack.flink.sql.sink.rdb.writer.JDBCWriter; -import com.dtstack.flink.sql.sink.rdb.writer.UpsertWriter; +import com.dtstack.flink.sql.sink.rdb.writer.AbstractUpsertWriter; import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.types.Row; @@ -44,6 +44,7 @@ /** * An upsert OutputFormat for JDBC. + * @author maqi */ public class JDBCUpsertOutputFormat extends AbstractJDBCOutputFormat> { @@ -83,7 +84,7 @@ public JDBCUpsertOutputFormat( long flushIntervalMills, boolean allReplace, String updateMode) { - super(options.getUsername(), options.getPassword(), options.getDriverName(), options.getDbURL()); + super(options.getUsername(), options.getPassword(), options.getDriverName(), options.getDbUrl()); this.schema = options.getSchema(); this.tableName = options.getTableName(); this.dialect = options.getDialect(); @@ -111,12 +112,12 @@ public void open(int taskNumber, int numTasks) throws IOException { initMetric(); if (StringUtils.equalsIgnoreCase(updateMode, EUpdateMode.APPEND.name()) || keyFields == null || keyFields.length == 0) { - String insertSQL = dialect.getInsertIntoStatement(schema, tableName, fieldNames, partitionFields); - LOG.info("execute insert sql: {}", insertSQL); - System.out.println("execute insert sql :" + insertSQL); - jdbcWriter = new AppendOnlyWriter(insertSQL, fieldTypes, this); + String insertSql = dialect.getInsertIntoStatement(schema, tableName, fieldNames, partitionFields); + LOG.info("execute insert sql: {}", insertSql); + System.out.println("execute insert sql :" + insertSql); + jdbcWriter = new AppendOnlyWriter(insertSql, fieldTypes, this); } else { - jdbcWriter = UpsertWriter.create( + jdbcWriter = AbstractUpsertWriter.create( dialect, schema, tableName, fieldNames, fieldTypes, keyFields, partitionFields, getRuntimeContext().getExecutionConfig().isObjectReuseEnabled(), allReplace, this); } diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java index 59eee0858..2aa7b01f9 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java @@ -34,6 +34,7 @@ /** * Just append record to jdbc, can not receive retract/delete message. + * @author maqi */ public class AppendOnlyWriter implements JDBCWriter { @@ -41,7 +42,7 @@ public class AppendOnlyWriter implements JDBCWriter { private static final Logger LOG = LoggerFactory.getLogger(AppendOnlyWriter.class); - private final String insertSQL; + private final String insertSql; private final int[] fieldTypes; private transient PreparedStatement statement; @@ -49,8 +50,8 @@ public class AppendOnlyWriter implements JDBCWriter { // only use metric private transient DtRichOutputFormat metricOutputFormat; - public AppendOnlyWriter(String insertSQL, int[] fieldTypes, DtRichOutputFormat metricOutputFormat) { - this.insertSQL = insertSQL; + public AppendOnlyWriter(String insertSql, int[] fieldTypes, DtRichOutputFormat metricOutputFormat) { + this.insertSql = insertSql; this.fieldTypes = fieldTypes; this.metricOutputFormat = metricOutputFormat; } @@ -58,7 +59,7 @@ public AppendOnlyWriter(String insertSQL, int[] fieldTypes, DtRichOutputFormat m @Override public void open(Connection connection) throws SQLException { this.rows = new ArrayList(); - this.statement = connection.prepareStatement(insertSQL); + this.statement = connection.prepareStatement(insertSql); } /** diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/UpsertWriter.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/UpsertWriter.java deleted file mode 100644 index c7074de59..000000000 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/UpsertWriter.java +++ /dev/null @@ -1,338 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.dtstack.flink.sql.sink.rdb.writer; - -import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; -import com.dtstack.flink.sql.sink.rdb.dialect.JDBCDialect; -import org.apache.flink.api.java.tuple.Tuple2; -import org.apache.flink.types.Row; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.function.Function; - -import static com.dtstack.flink.sql.sink.rdb.JDBCTypeConvertUtils.setRecordToStatement; -import static org.apache.flink.util.Preconditions.checkNotNull; - -/** - * Upsert writer to deal with upsert, delete message.dd - */ -public abstract class UpsertWriter implements JDBCWriter { - - private static final long serialVersionUID = 1L; - private static final Logger LOG = LoggerFactory.getLogger(UpsertWriter.class); - - - public static UpsertWriter create( - JDBCDialect dialect, - String schema, - String tableName, - String[] fieldNames, - int[] fieldTypes, - String[] keyFields, - String[] partitionFields, - boolean objectReuse, - boolean allReplace, - DtRichOutputFormat metricOutputFormat) { - - checkNotNull(keyFields); - - List nameList = Arrays.asList(fieldNames); - int[] pkFields = Arrays.stream(keyFields).mapToInt(nameList::indexOf).toArray(); - int[] pkTypes = fieldTypes == null ? null : - Arrays.stream(pkFields).map(f -> fieldTypes[f]).toArray(); - - String deleteSQL = dialect.getDeleteStatement(schema, tableName, keyFields); - LOG.info("deleteSQL is :{}", deleteSQL); - System.out.println("deleteSQL is :" + deleteSQL); - - Optional upsertSQL = dialect.getUpsertStatement(schema, tableName, fieldNames, keyFields, allReplace); - LOG.info("execute UpsertStatement: {}", upsertSQL.orElse("use UsingInsertUpdateStatement")); - System.out.println("execute UpsertStatement: " + upsertSQL.orElse("use UsingInsertUpdateStatement")); - - return upsertSQL.map((Function) sql -> - new UpsertWriterUsingUpsertStatement( - fieldTypes, pkFields, pkTypes, objectReuse, deleteSQL, sql, metricOutputFormat)) - .orElseGet(() -> - new UpsertWriterUsingInsertUpdateStatement( - fieldTypes, pkFields, pkTypes, objectReuse, deleteSQL, - dialect.getRowExistsStatement(tableName, keyFields), - dialect.getInsertIntoStatement(schema, tableName, fieldNames, partitionFields), - dialect.getUpdateStatement(tableName, fieldNames, keyFields), - metricOutputFormat)); - } - - final int[] fieldTypes; - final int[] pkTypes; - private final int[] pkFields; - private final String deleteSQL; - private final boolean objectReuse; - - private transient Map> keyToRows; - private transient PreparedStatement deleteStatement; - // only use metric - private transient DtRichOutputFormat metricOutputFormat; - - private UpsertWriter(int[] fieldTypes, int[] pkFields, int[] pkTypes, String deleteSQL, boolean objectReuse, DtRichOutputFormat metricOutputFormat) { - this.fieldTypes = fieldTypes; - this.pkFields = pkFields; - this.pkTypes = pkTypes; - this.deleteSQL = deleteSQL; - this.objectReuse = objectReuse; - this.metricOutputFormat = metricOutputFormat; - } - - @Override - public void open(Connection connection) throws SQLException { - this.keyToRows = new HashMap<>(); - this.deleteStatement = connection.prepareStatement(deleteSQL); - } - - @Override - public void addRecord(Tuple2 record) throws SQLException { - // we don't need perform a deep copy, because jdbc field are immutable object. - Tuple2 tuple2 = objectReuse ? new Tuple2<>(record.f0, Row.copy(record.f1)) : record; - // add records to buffer - keyToRows.put(getPrimaryKey(tuple2.f1), tuple2); - } - - @Override - public void executeBatch(Connection connection) throws SQLException { - try { - if (keyToRows.size() > 0) { - for (Map.Entry> entry : keyToRows.entrySet()) { - Row pk = entry.getKey(); - Tuple2 tuple = entry.getValue(); - if (tuple.f0) { - processOneRowInBatch(pk, tuple.f1); - } else { - setRecordToStatement(deleteStatement, pkTypes, pk); - deleteStatement.addBatch(); - } - } - internalExecuteBatch(); - deleteStatement.executeBatch(); - connection.commit(); - keyToRows.clear(); - } - } catch (Exception e) { - // 清理批处理中的正确字段,防止重复写入 - connection.rollback(); - connection.commit(); - cleanBatchWhenError(); - executeUpdate(connection); - } - } - - @Override - public void executeUpdate(Connection connection) throws SQLException { - if (keyToRows.size() > 0) { - for (Map.Entry> entry : keyToRows.entrySet()) { - try { - Row pk = entry.getKey(); - Tuple2 tuple = entry.getValue(); - if (tuple.f0) { - processOneRowInBatch(pk, tuple.f1); - internalExecuteBatch(); - } else { - setRecordToStatement(deleteStatement, pkTypes, pk); - deleteStatement.executeUpdate(); - } - connection.commit(); - } catch (Exception e) { - System.out.println(e.getCause()); - // deal pg error: current transaction is aborted, commands ignored until end of transaction block - connection.rollback(); - connection.commit(); - if (metricOutputFormat.outDirtyRecords.getCount() % DIRTYDATA_PRINT_FREQUENTY == 0 || LOG.isDebugEnabled()) { - LOG.error("record insert failed ,this row is {}", entry.getValue()); - LOG.error("", e); - } - metricOutputFormat.outDirtyRecords.inc(); - } - } - keyToRows.clear(); - } - } - - abstract void processOneRowInBatch(Row pk, Row row) throws SQLException; - - abstract void internalExecuteBatch() throws SQLException; - - @Override - public void close() throws SQLException { - if (deleteStatement != null) { - deleteStatement.close(); - deleteStatement = null; - } - } - - private Row getPrimaryKey(Row row) { - Row pks = new Row(pkFields.length); - for (int i = 0; i < pkFields.length; i++) { - pks.setField(i, row.getField(pkFields[i])); - } - return pks; - } - - // ---------------------------------------------------------------------------------------- - - private static final class UpsertWriterUsingUpsertStatement extends UpsertWriter { - - private static final long serialVersionUID = 1L; - private final String upsertSQL; - - private transient PreparedStatement upsertStatement; - - private UpsertWriterUsingUpsertStatement( - int[] fieldTypes, - int[] pkFields, - int[] pkTypes, - boolean objectReuse, - String deleteSQL, - String upsertSQL, - DtRichOutputFormat metricOutputFormat) { - super(fieldTypes, pkFields, pkTypes, deleteSQL, objectReuse, metricOutputFormat); - this.upsertSQL = upsertSQL; - } - - @Override - public void open(Connection connection) throws SQLException { - super.open(connection); - upsertStatement = connection.prepareStatement(upsertSQL); - } - - @Override - void processOneRowInBatch(Row pk, Row row) throws SQLException { - setRecordToStatement(upsertStatement, fieldTypes, row); - upsertStatement.addBatch(); - } - - @Override - public void cleanBatchWhenError() throws SQLException { - upsertStatement.clearBatch(); - upsertStatement.clearParameters(); - } - - @Override - void internalExecuteBatch() throws SQLException { - upsertStatement.executeBatch(); - } - - @Override - public void close() throws SQLException { - super.close(); - if (upsertStatement != null) { - upsertStatement.close(); - upsertStatement = null; - } - } - } - - private static final class UpsertWriterUsingInsertUpdateStatement extends UpsertWriter { - - private static final long serialVersionUID = 1L; - private final String existSQL; - private final String insertSQL; - private final String updateSQL; - - private transient PreparedStatement existStatement; - private transient PreparedStatement insertStatement; - private transient PreparedStatement updateStatement; - - private UpsertWriterUsingInsertUpdateStatement( - int[] fieldTypes, - int[] pkFields, - int[] pkTypes, - boolean objectReuse, - String deleteSQL, - String existSQL, - String insertSQL, - String updateSQL, - DtRichOutputFormat metricOutputFormat) { - super(fieldTypes, pkFields, pkTypes, deleteSQL, objectReuse, metricOutputFormat); - this.existSQL = existSQL; - this.insertSQL = insertSQL; - this.updateSQL = updateSQL; - } - - @Override - public void open(Connection connection) throws SQLException { - super.open(connection); - existStatement = connection.prepareStatement(existSQL); - insertStatement = connection.prepareStatement(insertSQL); - updateStatement = connection.prepareStatement(updateSQL); - } - - @Override - void processOneRowInBatch(Row pk, Row row) throws SQLException { - setRecordToStatement(existStatement, pkTypes, pk); - ResultSet resultSet = existStatement.executeQuery(); - boolean exist = resultSet.next(); - resultSet.close(); - if (exist) { - // do update - setRecordToStatement(updateStatement, fieldTypes, row); - updateStatement.addBatch(); - } else { - // do insert - setRecordToStatement(insertStatement, fieldTypes, row); - insertStatement.addBatch(); - } - } - - @Override - public void cleanBatchWhenError() throws SQLException { - updateStatement.clearBatch(); - insertStatement.clearBatch(); - } - - @Override - void internalExecuteBatch() throws SQLException { - updateStatement.executeBatch(); - insertStatement.executeBatch(); - } - - @Override - public void close() throws SQLException { - super.close(); - if (existStatement != null) { - existStatement.close(); - existStatement = null; - } - if (insertStatement != null) { - insertStatement.close(); - insertStatement = null; - } - if (updateStatement != null) { - updateStatement.close(); - updateStatement = null; - } - } - } -} diff --git a/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllReqRow.java b/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllReqRow.java index 961539fde..af1d94171 100644 --- a/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllReqRow.java +++ b/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllReqRow.java @@ -21,21 +21,26 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.SideTableInfo; -import com.dtstack.flink.sql.side.rdb.all.RdbAllReqRow; +import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.DtStringUtil; import org.apache.flink.api.java.typeutils.RowTypeInfo; import com.google.common.collect.Maps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import java.sql.Connection; import java.sql.DriverManager; import java.util.List; import java.util.Map; + /** - * side operator with cache for all(period reload) + * side operator with cache for all(period reload) + * Date: 2019/11/26 + * Company: www.dtstack.com + * @author maqi */ -public class SqlserverAllReqRow extends RdbAllReqRow { +public class SqlserverAllReqRow extends AbstractRdbAllReqRow { private static final Logger LOG = LoggerFactory.getLogger(SqlserverAllReqRow.class); @@ -46,13 +51,12 @@ public SqlserverAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List addParams = Maps.newHashMap(); - //addParams.put("useCursorFetch", "true"); - String targetDbUrl = DtStringUtil.addJdbcParam(dbURL, addParams, true); + String targetDbUrl = DtStringUtil.addJdbcParam(dbUrl, addParams, true); return DriverManager.getConnection(targetDbUrl, userName, password); } catch (Exception e) { LOG.error("", e); diff --git a/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllSideInfo.java b/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllSideInfo.java index 2b9913e7e..8f0fc63a4 100644 --- a/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllSideInfo.java +++ b/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllSideInfo.java @@ -24,7 +24,11 @@ import com.dtstack.flink.sql.side.rdb.all.RdbAllSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; import java.util.List; - +/** + * Date: 2019/11/26 + * Company: www.dtstack.com + * @author maqi + */ public class SqlserverAllSideInfo extends RdbAllSideInfo { public SqlserverAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { diff --git a/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java b/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java index cb4240ca6..070bfe334 100644 --- a/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java +++ b/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java @@ -33,7 +33,11 @@ import java.util.List; - +/** + * Date: 2019/11/26 + * Company: www.dtstack.com + * @author maqi + */ public class SqlserverAsyncReqRow extends RdbAsyncReqRow { private static final Logger LOG = LoggerFactory.getLogger(SqlserverAsyncReqRow.class); @@ -67,6 +71,6 @@ public void open(Configuration parameters) throws Exception { vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); - setRdbSQLClient(JDBCClient.createNonShared(vertx, sqlserverClientConfig)); + setRdbSqlClient(JDBCClient.createNonShared(vertx, sqlserverClientConfig)); } } diff --git a/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncSideInfo.java b/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncSideInfo.java index 347ca9bf4..d3c833c96 100644 --- a/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncSideInfo.java +++ b/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncSideInfo.java @@ -23,10 +23,15 @@ import com.dtstack.flink.sql.side.SideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; -import java.util.List; +import java.util.List; +/** + * Date: 2019/11/26 + * Company: www.dtstack.com + * @author maqi + */ public class SqlserverAsyncSideInfo extends RdbAsyncSideInfo { public SqlserverAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { diff --git a/sqlserver/sqlserver-side/sqlserver-side-core/src/main/java/com/dtstack/flink/sql/side/sqlserver/table/SqlserverSideParser.java b/sqlserver/sqlserver-side/sqlserver-side-core/src/main/java/com/dtstack/flink/sql/side/sqlserver/table/SqlserverSideParser.java index 3631dcd8a..be8ebb152 100644 --- a/sqlserver/sqlserver-side/sqlserver-side-core/src/main/java/com/dtstack/flink/sql/side/sqlserver/table/SqlserverSideParser.java +++ b/sqlserver/sqlserver-side/sqlserver-side-core/src/main/java/com/dtstack/flink/sql/side/sqlserver/table/SqlserverSideParser.java @@ -21,7 +21,11 @@ import com.dtstack.flink.sql.table.TableInfo; import java.util.Map; - +/** + * Date: 2019/11/26 + * Company: www.dtstack.com + * @author maqi + */ public class SqlserverSideParser extends RdbSideParser { private static final String CURR_TYPE = "sqlserver"; diff --git a/sqlserver/sqlserver-sink/src/main/java/com/dtstack/flink/sql/sink/sqlserver/SqlserverSink.java b/sqlserver/sqlserver-sink/src/main/java/com/dtstack/flink/sql/sink/sqlserver/SqlserverSink.java index dbbeb820e..3c331cdd0 100644 --- a/sqlserver/sqlserver-sink/src/main/java/com/dtstack/flink/sql/sink/sqlserver/SqlserverSink.java +++ b/sqlserver/sqlserver-sink/src/main/java/com/dtstack/flink/sql/sink/sqlserver/SqlserverSink.java @@ -19,7 +19,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.rdb.JDBCOptions; -import com.dtstack.flink.sql.sink.rdb.RdbSink; +import com.dtstack.flink.sql.sink.rdb.AbstractRdbSink; import com.dtstack.flink.sql.sink.rdb.format.JDBCUpsertOutputFormat; /** @@ -29,7 +29,7 @@ * * @author maqi */ -public class SqlserverSink extends RdbSink implements IStreamSinkGener { +public class SqlserverSink extends AbstractRdbSink implements IStreamSinkGener { public SqlserverSink() { super(new SqlserverDialect()); @@ -38,7 +38,7 @@ public SqlserverSink() { @Override public JDBCUpsertOutputFormat getOutputFormat() { JDBCOptions jdbcOptions = JDBCOptions.builder() - .setDBUrl(dbURL) + .setDbUrl(dbUrl) .setDialect(jdbcDialect) .setUsername(userName) .setPassword(password) From 36549fb01ed85eefa816e2de7c9a1adf46245e2f Mon Sep 17 00:00:00 2001 From: maqi Date: Tue, 25 Feb 2020 15:58:56 +0800 Subject: [PATCH 04/47] rename --- .../side/rdb/all/AbstractRdbAllReqRow.java | 228 ++++++++++++ .../sink/rdb/writer/AbstractUpsertWriter.java | 339 ++++++++++++++++++ 2 files changed, 567 insertions(+) create mode 100644 rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java create mode 100644 rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java new file mode 100644 index 000000000..aeb88f7a3 --- /dev/null +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.dtstack.flink.sql.side.rdb.all; + +import com.dtstack.flink.sql.side.AllReqRow; +import com.dtstack.flink.sql.side.SideInfo; +import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; +import com.dtstack.flink.sql.side.rdb.util.SwitchUtil; +import org.apache.calcite.sql.JoinType; +import org.apache.commons.collections.CollectionUtils; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo; +import org.apache.flink.types.Row; +import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +/** + * side operator with cache for all(period reload) + * Date: 2018/11/26 + * Company: www.dtstack.com + * + * @author maqi + */ + +public abstract class AbstractRdbAllReqRow extends AllReqRow { + + private static final long serialVersionUID = 2098635140857937718L; + + private static final Logger LOG = LoggerFactory.getLogger(AbstractRdbAllReqRow.class); + + private static final int CONN_RETRY_NUM = 3; + + private AtomicReference>>> cacheRef = new AtomicReference<>(); + + public AbstractRdbAllReqRow(SideInfo sideInfo) { + super(sideInfo); + } + + @Override + public Row fillData(Row input, Object sideInput) { + Map cacheInfo = (Map) sideInput; + Row row = new Row(sideInfo.getOutFieldInfoList().size()); + for (Map.Entry entry : sideInfo.getInFieldIndex().entrySet()) { + Object obj = input.getField(entry.getValue()); + boolean isTimeIndicatorTypeInfo = TimeIndicatorTypeInfo.class.isAssignableFrom(sideInfo.getRowTypeInfo().getTypeAt(entry.getValue()).getClass()); + + //Type information for indicating event or processing time. However, it behaves like a regular SQL timestamp but is serialized as Long. + if (obj instanceof Timestamp && isTimeIndicatorTypeInfo) { + obj = ((Timestamp) obj).getTime(); + } + + row.setField(entry.getKey(), obj); + } + + for (Map.Entry entry : sideInfo.getSideFieldNameIndex().entrySet()) { + if (cacheInfo == null) { + row.setField(entry.getKey(), null); + } else { + row.setField(entry.getKey(), cacheInfo.get(entry.getValue())); + } + } + + return row; + } + + @Override + protected void initCache() throws SQLException { + Map>> newCache = Maps.newConcurrentMap(); + cacheRef.set(newCache); + loadData(newCache); + } + + @Override + protected void reloadCache() { + //reload cacheRef and replace to old cacheRef + Map>> newCache = Maps.newConcurrentMap(); + try { + loadData(newCache); + } catch (SQLException e) { + LOG.error("", e); + } + + cacheRef.set(newCache); + LOG.info("----- rdb all cacheRef reload end:{}", Calendar.getInstance()); + } + + + @Override + public void flatMap(CRow value, Collector out) throws Exception { + List inputParams = Lists.newArrayList(); + for (Integer conValIndex : sideInfo.getEqualValIndex()) { + Object equalObj = value.row().getField(conValIndex); + if (equalObj == null) { + if (sideInfo.getJoinType() == JoinType.LEFT) { + Row row = fillData(value.row(), null); + out.collect(new CRow(row, value.change())); + } + return; + } + inputParams.add(equalObj); + } + + String key = buildKey(inputParams); + List> cacheList = cacheRef.get().get(key); + if (CollectionUtils.isEmpty(cacheList)) { + if (sideInfo.getJoinType() == JoinType.LEFT) { + Row row = fillData(value.row(), null); + out.collect(new CRow(row, value.change())); + } else { + return; + } + + return; + } + + for (Map one : cacheList) { + out.collect(new CRow(fillData(value.row(), one), value.change())); + } + } + + private String buildKey(List equalValList) { + StringBuilder sb = new StringBuilder(""); + for (Object equalVal : equalValList) { + sb.append(equalVal).append("_"); + } + + return sb.toString(); + } + + private String buildKey(Map val, List equalFieldList) { + StringBuilder sb = new StringBuilder(""); + for (String equalField : equalFieldList) { + sb.append(val.get(equalField)).append("_"); + } + + return sb.toString(); + } + + public abstract Connection getConn(String dbUrl, String userName, String password); + + + private void loadData(Map>> tmpCache) throws SQLException { + RdbSideTableInfo tableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); + Connection connection = null; + + try { + for (int i = 0; i < CONN_RETRY_NUM; i++) { + try { + connection = getConn(tableInfo.getUrl(), tableInfo.getUserName(), tableInfo.getPassword()); + break; + } catch (Exception e) { + if (i == CONN_RETRY_NUM - 1) { + throw new RuntimeException("", e); + } + try { + String connInfo = "url:" + tableInfo.getUrl() + ";userName:" + tableInfo.getUserName() + ",pwd:" + tableInfo.getPassword(); + LOG.warn("get conn fail, wait for 5 sec and try again, connInfo:" + connInfo); + Thread.sleep(5 * 1000); + } catch (InterruptedException e1) { + LOG.error("", e1); + } + } + + } + + //load data from table + String sql = sideInfo.getSqlCondition(); + Statement statement = connection.createStatement(); + statement.setFetchSize(getFetchSize()); + ResultSet resultSet = statement.executeQuery(sql); + String[] sideFieldNames = sideInfo.getSideSelectFields().split(","); + String[] fields = sideInfo.getSideTableInfo().getFieldTypes(); + while (resultSet.next()) { + Map oneRow = Maps.newHashMap(); + for (String fieldName : sideFieldNames) { + Object object = resultSet.getObject(fieldName.trim()); + int fieldIndex = sideInfo.getSideTableInfo().getFieldList().indexOf(fieldName.trim()); + object = SwitchUtil.getTarget(object, fields[fieldIndex]); + oneRow.put(fieldName.trim(), object); + } + + String cacheKey = buildKey(oneRow, sideInfo.getEqualFieldList()); + List> list = tmpCache.computeIfAbsent(cacheKey, key -> Lists.newArrayList()); + list.add(oneRow); + } + } catch (Exception e) { + LOG.error("", e); + } finally { + if (connection != null) { + connection.close(); + } + } + } + + public int getFetchSize() { + return 1000; + } + +} diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java new file mode 100644 index 000000000..373a59f6a --- /dev/null +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java @@ -0,0 +1,339 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.dtstack.flink.sql.sink.rdb.writer; + +import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; +import com.dtstack.flink.sql.sink.rdb.dialect.JDBCDialect; +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.types.Row; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; + +import static com.dtstack.flink.sql.sink.rdb.JDBCTypeConvertUtils.setRecordToStatement; +import static org.apache.flink.util.Preconditions.checkNotNull; + +/** + * Upsert writer to deal with upsert, delete message.dd + * @author maqi + */ +public abstract class AbstractUpsertWriter implements JDBCWriter { + + private static final long serialVersionUID = 1L; + private static final Logger LOG = LoggerFactory.getLogger(AbstractUpsertWriter.class); + + + public static AbstractUpsertWriter create( + JDBCDialect dialect, + String schema, + String tableName, + String[] fieldNames, + int[] fieldTypes, + String[] keyFields, + String[] partitionFields, + boolean objectReuse, + boolean allReplace, + DtRichOutputFormat metricOutputFormat) { + + checkNotNull(keyFields); + + List nameList = Arrays.asList(fieldNames); + int[] pkFields = Arrays.stream(keyFields).mapToInt(nameList::indexOf).toArray(); + int[] pkTypes = fieldTypes == null ? null : + Arrays.stream(pkFields).map(f -> fieldTypes[f]).toArray(); + + String deleteSql = dialect.getDeleteStatement(schema, tableName, keyFields); + LOG.info("deleteSQL is :{}", deleteSql); + System.out.println("deleteSQL is :" + deleteSql); + + Optional upsertSql = dialect.getUpsertStatement(schema, tableName, fieldNames, keyFields, allReplace); + LOG.info("execute UpsertStatement: {}", upsertSql.orElse("use UsingInsertUpdateStatement")); + System.out.println("execute UpsertStatement: " + upsertSql.orElse("use UsingInsertUpdateStatement")); + + return upsertSql.map((Function) sql -> + new UpsertWriterUsingUpsertStatement( + fieldTypes, pkFields, pkTypes, objectReuse, deleteSql, sql, metricOutputFormat)) + .orElseGet(() -> + new UpsertWriterUsingInsertUpdateStatement( + fieldTypes, pkFields, pkTypes, objectReuse, deleteSql, + dialect.getRowExistsStatement(tableName, keyFields), + dialect.getInsertIntoStatement(schema, tableName, fieldNames, partitionFields), + dialect.getUpdateStatement(tableName, fieldNames, keyFields), + metricOutputFormat)); + } + + final int[] fieldTypes; + final int[] pkTypes; + private final int[] pkFields; + private final String deleteSql; + private final boolean objectReuse; + + private transient Map> keyToRows; + private transient PreparedStatement deleteStatement; + // only use metric + private transient DtRichOutputFormat metricOutputFormat; + + private AbstractUpsertWriter(int[] fieldTypes, int[] pkFields, int[] pkTypes, String deleteSql, boolean objectReuse, DtRichOutputFormat metricOutputFormat) { + this.fieldTypes = fieldTypes; + this.pkFields = pkFields; + this.pkTypes = pkTypes; + this.deleteSql = deleteSql; + this.objectReuse = objectReuse; + this.metricOutputFormat = metricOutputFormat; + } + + @Override + public void open(Connection connection) throws SQLException { + this.keyToRows = new HashMap<>(); + this.deleteStatement = connection.prepareStatement(deleteSql); + } + + @Override + public void addRecord(Tuple2 record) throws SQLException { + // we don't need perform a deep copy, because jdbc field are immutable object. + Tuple2 tuple2 = objectReuse ? new Tuple2<>(record.f0, Row.copy(record.f1)) : record; + // add records to buffer + keyToRows.put(getPrimaryKey(tuple2.f1), tuple2); + } + + @Override + public void executeBatch(Connection connection) throws SQLException { + try { + if (keyToRows.size() > 0) { + for (Map.Entry> entry : keyToRows.entrySet()) { + Row pk = entry.getKey(); + Tuple2 tuple = entry.getValue(); + if (tuple.f0) { + processOneRowInBatch(pk, tuple.f1); + } else { + setRecordToStatement(deleteStatement, pkTypes, pk); + deleteStatement.addBatch(); + } + } + internalExecuteBatch(); + deleteStatement.executeBatch(); + connection.commit(); + keyToRows.clear(); + } + } catch (Exception e) { + // 清理批处理中的正确字段,防止重复写入 + connection.rollback(); + connection.commit(); + cleanBatchWhenError(); + executeUpdate(connection); + } + } + + @Override + public void executeUpdate(Connection connection) throws SQLException { + if (keyToRows.size() > 0) { + for (Map.Entry> entry : keyToRows.entrySet()) { + try { + Row pk = entry.getKey(); + Tuple2 tuple = entry.getValue(); + if (tuple.f0) { + processOneRowInBatch(pk, tuple.f1); + internalExecuteBatch(); + } else { + setRecordToStatement(deleteStatement, pkTypes, pk); + deleteStatement.executeUpdate(); + } + connection.commit(); + } catch (Exception e) { + System.out.println(e.getCause()); + // deal pg error: current transaction is aborted, commands ignored until end of transaction block + connection.rollback(); + connection.commit(); + if (metricOutputFormat.outDirtyRecords.getCount() % DIRTYDATA_PRINT_FREQUENTY == 0 || LOG.isDebugEnabled()) { + LOG.error("record insert failed ,this row is {}", entry.getValue()); + LOG.error("", e); + } + metricOutputFormat.outDirtyRecords.inc(); + } + } + keyToRows.clear(); + } + } + + abstract void processOneRowInBatch(Row pk, Row row) throws SQLException; + + abstract void internalExecuteBatch() throws SQLException; + + @Override + public void close() throws SQLException { + if (deleteStatement != null) { + deleteStatement.close(); + deleteStatement = null; + } + } + + private Row getPrimaryKey(Row row) { + Row pks = new Row(pkFields.length); + for (int i = 0; i < pkFields.length; i++) { + pks.setField(i, row.getField(pkFields[i])); + } + return pks; + } + + // ---------------------------------------------------------------------------------------- + + private static final class UpsertWriterUsingUpsertStatement extends AbstractUpsertWriter { + + private static final long serialVersionUID = 1L; + private final String upsertSql; + + private transient PreparedStatement upsertStatement; + + private UpsertWriterUsingUpsertStatement( + int[] fieldTypes, + int[] pkFields, + int[] pkTypes, + boolean objectReuse, + String deleteSql, + String upsertSql, + DtRichOutputFormat metricOutputFormat) { + super(fieldTypes, pkFields, pkTypes, deleteSql, objectReuse, metricOutputFormat); + this.upsertSql = upsertSql; + } + + @Override + public void open(Connection connection) throws SQLException { + super.open(connection); + upsertStatement = connection.prepareStatement(upsertSql); + } + + @Override + void processOneRowInBatch(Row pk, Row row) throws SQLException { + setRecordToStatement(upsertStatement, fieldTypes, row); + upsertStatement.addBatch(); + } + + @Override + public void cleanBatchWhenError() throws SQLException { + upsertStatement.clearBatch(); + upsertStatement.clearParameters(); + } + + @Override + void internalExecuteBatch() throws SQLException { + upsertStatement.executeBatch(); + } + + @Override + public void close() throws SQLException { + super.close(); + if (upsertStatement != null) { + upsertStatement.close(); + upsertStatement = null; + } + } + } + + private static final class UpsertWriterUsingInsertUpdateStatement extends AbstractUpsertWriter { + + private static final long serialVersionUID = 1L; + private final String existSql; + private final String insertSql; + private final String updateSql; + + private transient PreparedStatement existStatement; + private transient PreparedStatement insertStatement; + private transient PreparedStatement updateStatement; + + private UpsertWriterUsingInsertUpdateStatement( + int[] fieldTypes, + int[] pkFields, + int[] pkTypes, + boolean objectReuse, + String deleteSql, + String existSql, + String insertSql, + String updateSql, + DtRichOutputFormat metricOutputFormat) { + super(fieldTypes, pkFields, pkTypes, deleteSql, objectReuse, metricOutputFormat); + this.existSql = existSql; + this.insertSql = insertSql; + this.updateSql = updateSql; + } + + @Override + public void open(Connection connection) throws SQLException { + super.open(connection); + existStatement = connection.prepareStatement(existSql); + insertStatement = connection.prepareStatement(insertSql); + updateStatement = connection.prepareStatement(updateSql); + } + + @Override + void processOneRowInBatch(Row pk, Row row) throws SQLException { + setRecordToStatement(existStatement, pkTypes, pk); + ResultSet resultSet = existStatement.executeQuery(); + boolean exist = resultSet.next(); + resultSet.close(); + if (exist) { + // do update + setRecordToStatement(updateStatement, fieldTypes, row); + updateStatement.addBatch(); + } else { + // do insert + setRecordToStatement(insertStatement, fieldTypes, row); + insertStatement.addBatch(); + } + } + + @Override + public void cleanBatchWhenError() throws SQLException { + updateStatement.clearBatch(); + insertStatement.clearBatch(); + } + + @Override + void internalExecuteBatch() throws SQLException { + updateStatement.executeBatch(); + insertStatement.executeBatch(); + } + + @Override + public void close() throws SQLException { + super.close(); + if (existStatement != null) { + existStatement.close(); + existStatement = null; + } + if (insertStatement != null) { + insertStatement.close(); + insertStatement = null; + } + if (updateStatement != null) { + updateStatement.close(); + updateStatement = null; + } + } + } +} From aac3a273f52d39318e943c2a4616d4e5a97394e8 Mon Sep 17 00:00:00 2001 From: maqi Date: Tue, 25 Feb 2020 16:13:25 +0800 Subject: [PATCH 05/47] redis can --- .../flink/sql/side/redis/RedisAllReqRow.java | 7 ++-- .../sql/side/redis/RedisAllSideInfo.java | 4 ++- .../sql/side/redis/RedisAsyncReqRow.java | 5 ++- .../sql/side/redis/RedisAsyncSideInfo.java | 4 ++- .../sql/side/redis/table/RedisSideParser.java | 3 ++ .../side/redis/table/RedisSideTableInfo.java | 4 ++- .../sql/sink/redis/RedisOutputFormat.java | 35 +++++++++++++------ .../flink/sql/sink/redis/RedisSink.java | 4 ++- .../sql/sink/redis/table/RedisSinkParser.java | 5 +-- .../sql/sink/redis/table/RedisTableInfo.java | 4 ++- .../CustomerSocketTextStreamFunction.java | 2 +- .../table/ServersocketSourceTableInfo.java | 2 +- 12 files changed, 56 insertions(+), 23 deletions(-) diff --git a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java index 59997f5cb..a215c9d75 100644 --- a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java +++ b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java @@ -39,7 +39,9 @@ import java.util.*; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; - +/** + * @author yanxi + */ public class RedisAllReqRow extends AllReqRow{ private static final long serialVersionUID = 7578879189085344807L; @@ -259,7 +261,8 @@ private JedisCommands getJedis(RedisSideTableInfo tableInfo) { break; //集群 case 3: - jedis = new JedisCluster(addresses, timeout, timeout,1, poolConfig); + jedis = new JedisCluster(addresses, timeout, timeout, 1, poolConfig); + default: } return jedis; diff --git a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllSideInfo.java b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllSideInfo.java index 81e7b1f4f..fc24bc4ef 100644 --- a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllSideInfo.java +++ b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllSideInfo.java @@ -28,7 +28,9 @@ import com.google.common.collect.Lists; import java.util.List; - +/** + * @author yanxi + */ public class RedisAllSideInfo extends SideInfo { private static final long serialVersionUID = 1998703966487857613L; diff --git a/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java b/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java index d20875e01..ee951a928 100644 --- a/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java +++ b/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java @@ -43,7 +43,9 @@ import java.util.List; import java.util.Map; import java.util.function.Consumer; - +/** + * @author yanxi + */ public class RedisAsyncReqRow extends AsyncReqRow { private static final long serialVersionUID = -2079908694523987738L; @@ -108,6 +110,7 @@ private void buildRedisClient(RedisSideTableInfo tableInfo){ clusterClient = RedisClusterClient.create(clusterUri.toString()); clusterConnection = clusterClient.connect(); async = clusterConnection.async(); + default: } } diff --git a/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncSideInfo.java b/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncSideInfo.java index 9bdefe8c5..2736fc8a7 100644 --- a/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncSideInfo.java +++ b/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncSideInfo.java @@ -28,7 +28,9 @@ import com.google.common.collect.Lists; import java.util.List; - +/** + * @author yanxi + */ public class RedisAsyncSideInfo extends SideInfo { private static final long serialVersionUID = -4851348392924455039L; diff --git a/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideParser.java b/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideParser.java index f8a83b868..2d0fb77d4 100644 --- a/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideParser.java +++ b/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideParser.java @@ -24,6 +24,9 @@ import java.util.Map; +/** + * @author yanxi + */ public class RedisSideParser extends AbsSideTableParser { @Override diff --git a/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideTableInfo.java b/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideTableInfo.java index 88cfcb8a2..ae6cdd017 100644 --- a/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideTableInfo.java +++ b/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideTableInfo.java @@ -20,7 +20,9 @@ import com.dtstack.flink.sql.side.SideTableInfo; import com.google.common.base.Preconditions; - +/** + * @author yanxi + */ public class RedisSideTableInfo extends SideTableInfo { private static final long serialVersionUID = -1L; diff --git a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java index 62d14cabd..bbf49dff7 100644 --- a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java +++ b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java @@ -26,11 +26,23 @@ import org.apache.flink.types.Row; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import redis.clients.jedis.*; +import redis.clients.jedis.HostAndPort; +import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.JedisCommands; +import redis.clients.jedis.JedisPool; +import redis.clients.jedis.JedisSentinelPool; + import java.io.Closeable; import java.io.IOException; -import java.util.*; - +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +/** + * @author yanxi + */ public class RedisOutputFormat extends DtRichOutputFormat { private static final Logger LOG = LoggerFactory.getLogger(RedisOutputFormat.class); @@ -129,7 +141,8 @@ private void establishConnection() { break; //集群 case 3: - jedis = new JedisCluster(addresses, timeout, timeout,10, password, poolConfig); + jedis = new JedisCluster(addresses, timeout, timeout, 10, password, poolConfig); + default: } } @@ -145,10 +158,10 @@ public void writeRecord(Tuple2 record) throws IOException { return; } - HashMap map = new HashMap<>(); - for (String primaryKey : primaryKeys){ - for (int i=0; i map = new HashMap<>(8); + for (String primaryKey : primaryKeys) { + for (int i = 0; i < fieldNames.length; i++) { + if (fieldNames[i].equals(primaryKey)) { map.put(primaryKey, i); } } @@ -156,10 +169,10 @@ public void writeRecord(Tuple2 record) throws IOException { List kvList = new LinkedList<>(); for (String primaryKey : primaryKeys){ - StringBuilder primaryKV = new StringBuilder(); + StringBuilder primaryKv = new StringBuilder(); int index = map.get(primaryKey).intValue(); - primaryKV.append(primaryKey).append(":").append(row.getField(index)); - kvList.add(primaryKV.toString()); + primaryKv.append(primaryKey).append(":").append(row.getField(index)); + kvList.add(primaryKv.toString()); } String perKey = String.join(":", kvList); diff --git a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisSink.java b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisSink.java index d2e28c01f..007c7fd3b 100644 --- a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisSink.java +++ b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisSink.java @@ -33,7 +33,9 @@ import org.apache.flink.types.Row; import java.util.List; - +/** + * @author yanxi + */ public class RedisSink implements RetractStreamTableSink, IStreamSinkGener { protected String[] fieldNames; diff --git a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisSinkParser.java b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisSinkParser.java index 49861bb9a..201abab2c 100644 --- a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisSinkParser.java +++ b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisSinkParser.java @@ -24,11 +24,12 @@ import com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; - +/** + * @author yanxi + */ public class RedisSinkParser extends AbsTableParser { @Override public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { diff --git a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisTableInfo.java b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisTableInfo.java index 60d6dd12f..5c9940252 100644 --- a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisTableInfo.java +++ b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisTableInfo.java @@ -20,7 +20,9 @@ import com.dtstack.flink.sql.table.TargetTableInfo; import com.google.common.base.Preconditions; - +/** + * @author yanxi + */ public class RedisTableInfo extends TargetTableInfo { private static final String CURR_TYPE = "redis"; diff --git a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/CustomerSocketTextStreamFunction.java b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/CustomerSocketTextStreamFunction.java index e46e0a76f..2f508b3e5 100644 --- a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/CustomerSocketTextStreamFunction.java +++ b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/CustomerSocketTextStreamFunction.java @@ -94,7 +94,7 @@ public void run(SourceContext ctx) throws Exception { while (buffer.length() >= delimiter.length() && (delimPos = buffer.indexOf(delimiter)) != -1) { String record = buffer.substring(0, delimPos); // truncate trailing carriage return - if (delimiter.equals("\n") && record.endsWith("\r")) { + if ("\n".equals(delimiter) && "\r".endsWith(record)) { record = record.substring(0, record.length() - 1); } try { diff --git a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceTableInfo.java b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceTableInfo.java index 796728eb3..02f6e9f30 100644 --- a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceTableInfo.java +++ b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceTableInfo.java @@ -28,7 +28,7 @@ * @author maqi */ public class ServersocketSourceTableInfo extends SourceTableInfo { - //version + private static final String CURR_TYPE = "serversocket"; public static final String HOSTNAME_KEY = "host"; From 42db63ec3d88d2234b2415846619b40e703dd07f Mon Sep 17 00:00:00 2001 From: maqi Date: Tue, 25 Feb 2020 16:20:24 +0800 Subject: [PATCH 06/47] merge 1.8_dev --- core/src/main/java/com/dtstack/flink/sql/Main.java | 8 -------- 1 file changed, 8 deletions(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/Main.java b/core/src/main/java/com/dtstack/flink/sql/Main.java index 4ff296f1f..b912128f9 100644 --- a/core/src/main/java/com/dtstack/flink/sql/Main.java +++ b/core/src/main/java/com/dtstack/flink/sql/Main.java @@ -27,8 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.LoggerContext; /** * Date: 2018/6/26 @@ -45,10 +43,4 @@ public static void main(String[] args) throws Exception { env.execute(paramsInfo.getName()); LOG.info("program {} execution success", paramsInfo.getName()); } - private static void setLogLevel(String level){ - LoggerContext loggerContext= (LoggerContext) LoggerFactory.getILoggerFactory(); - //设置全局日志级别 - ch.qos.logback.classic.Logger logger = loggerContext.getLogger("root"); - logger.setLevel(Level.toLevel(level, Level.INFO)); - } } From 87c26d71fecc1fd5aeb8d1b8c66d3077c8bcb45c Mon Sep 17 00:00:00 2001 From: zoudaokoulife Date: Sun, 1 Mar 2020 14:44:42 +0800 Subject: [PATCH 07/47] =?UTF-8?q?=E5=90=88=E5=B9=B6=E5=86=B2=E7=AA=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../com/dtstack/flink/sql/side/AsyncReqRow.java | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java b/core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java index 066708d6c..cdd4a1610 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java @@ -122,13 +122,14 @@ protected void dealCacheData(String key, CacheObj missKeyObj) { @Override public void timeout(CRow input, ResultFuture resultFuture) throws Exception { - StreamRecordQueueEntry future = (StreamRecordQueueEntry)resultFuture; - try { - if (null == future.get()) { - resultFuture.completeExceptionally(new TimeoutException("Async function call has timed out.")); - } - } catch (Exception e) { - resultFuture.completeExceptionally(new Exception(e)); + + if(timeOutNum % TIMEOUT_LOG_FLUSH_NUM == 0){ + LOG.info("Async function call has timed out. input:{}, timeOutNum:{}",input.toString(), timeOutNum); + } + + timeOutNum ++; + if(timeOutNum > sideInfo.getSideTableInfo().getAsyncTimeoutNumLimit()){ + resultFuture.complete(null); } } From 1248591145ceffe9f59c4bd6908677283798d402 Mon Sep 17 00:00:00 2001 From: dapeng Date: Sun, 1 Mar 2020 14:07:32 +0800 Subject: [PATCH 08/47] fix completeExceptionally desc --- core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java b/core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java index cdd4a1610..cd811e48f 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java @@ -129,7 +129,7 @@ public void timeout(CRow input, ResultFuture resultFuture) throws Exceptio timeOutNum ++; if(timeOutNum > sideInfo.getSideTableInfo().getAsyncTimeoutNumLimit()){ - resultFuture.complete(null); + resultFuture.completeExceptionally(new Exception("Async function call timedoutNum beyond limit.")); } } From d445fcac42fbc1b03aae9d1562ea3dd64b95fe21 Mon Sep 17 00:00:00 2001 From: dapeng Date: Sun, 1 Mar 2020 20:47:05 +0800 Subject: [PATCH 09/47] =?UTF-8?q?=E5=8F=82=E6=95=B0=E8=B5=8B=E5=80=BC?= =?UTF-8?q?=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java | 2 +- .../java/com/dtstack/flink/sql/table/AbsSideTableParser.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java b/core/src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java index 8c78578fb..c82489c94 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java @@ -65,7 +65,7 @@ public abstract class SideTableInfo extends TableInfo implements Serializable { private int asyncTimeout=10000; - private int asyncTimeoutNumLimit = Integer.MAX_VALUE; + private int asyncTimeoutNumLimit = 0; private boolean partitionedJoin = false; diff --git a/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java b/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java index 4e584cbe8..bf0b8a7a4 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java @@ -111,7 +111,7 @@ protected void parseCacheProp(SideTableInfo sideTableInfo, Map p if(props.containsKey(SideTableInfo.ASYNC_TIMEOUT_NUM_KEY.toLowerCase())){ Integer asyncTimeoutNum = MathUtil.getIntegerVal(props.get(SideTableInfo.ASYNC_TIMEOUT_NUM_KEY.toLowerCase())); if (asyncTimeoutNum > 0){ - sideTableInfo.setAsyncTimeout(asyncTimeoutNum); + sideTableInfo.setAsyncTimeoutNumLimit(asyncTimeoutNum); } } } From c47553672b5f5158315b62c96fca4947dbc34856 Mon Sep 17 00:00:00 2001 From: maqi Date: Mon, 2 Mar 2020 10:22:24 +0800 Subject: [PATCH 10/47] remove log level and modify objectmapper class --- core/pom.xml | 5 ----- .../src/main/java/com/dtstack/flink/sql/Main.java | 9 --------- .../com/dtstack/flink/sql/exec/ApiResult.java | 2 +- .../flink/sql/exec/ExecuteProcessHelper.java | 11 ----------- .../com/dtstack/flink/sql/exec/ParamsInfo.java | 15 ++------------- 5 files changed, 3 insertions(+), 39 deletions(-) diff --git a/core/pom.xml b/core/pom.xml index 4b5a3e67b..677f9d5ee 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -121,11 +121,6 @@ junit 4.12 - - ch.qos.logback - logback-classic - 1.1.7 - diff --git a/core/src/main/java/com/dtstack/flink/sql/Main.java b/core/src/main/java/com/dtstack/flink/sql/Main.java index 4ff296f1f..5d7528869 100644 --- a/core/src/main/java/com/dtstack/flink/sql/Main.java +++ b/core/src/main/java/com/dtstack/flink/sql/Main.java @@ -27,9 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.LoggerContext; - /** * Date: 2018/6/26 * Company: www.dtstack.com @@ -45,10 +42,4 @@ public static void main(String[] args) throws Exception { env.execute(paramsInfo.getName()); LOG.info("program {} execution success", paramsInfo.getName()); } - private static void setLogLevel(String level){ - LoggerContext loggerContext= (LoggerContext) LoggerFactory.getILoggerFactory(); - //设置全局日志级别 - ch.qos.logback.classic.Logger logger = loggerContext.getLogger("root"); - logger.setLevel(Level.toLevel(level, Level.INFO)); - } } diff --git a/core/src/main/java/com/dtstack/flink/sql/exec/ApiResult.java b/core/src/main/java/com/dtstack/flink/sql/exec/ApiResult.java index d6ca857c3..d67a39a40 100644 --- a/core/src/main/java/com/dtstack/flink/sql/exec/ApiResult.java +++ b/core/src/main/java/com/dtstack/flink/sql/exec/ApiResult.java @@ -1,6 +1,6 @@ package com.dtstack.flink.sql.exec; -import org.codehaus.jackson.map.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java index 2c6d74df0..d2c2926ef 100644 --- a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java +++ b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java @@ -18,8 +18,6 @@ package com.dtstack.flink.sql.exec; -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.LoggerContext; import com.dtstack.flink.sql.classloader.ClassLoaderManager; import com.dtstack.flink.sql.config.CalciteConfig; import com.dtstack.flink.sql.enums.ClusterMode; @@ -128,7 +126,6 @@ public static ParamsInfo parseParams(String[] args) throws Exception { .setDeployMode(deployMode) .setConfProp(confProperties) .setJarUrlList(jarURList) - .setLogLevel(logLevel) .build(); } @@ -154,8 +151,6 @@ public static StreamExecutionEnvironment getStreamExecution(ParamsInfo paramsInf StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env); StreamQueryConfig streamQueryConfig = StreamEnvConfigManager.getStreamQueryConfig(tableEnv, paramsInfo.getConfProp()); - setLogLevel(paramsInfo.getLogLevel()); - SqlParser.setLocalSqlPluginRoot(paramsInfo.getLocalSqlPluginPath()); SqlTree sqlTree = SqlParser.parseSql(paramsInfo.getSql()); @@ -348,11 +343,5 @@ public static StreamExecutionEnvironment getStreamExeEnv(Properties confProperti return env; } - private static void setLogLevel(String level){ - LoggerContext loggerContext= (LoggerContext) LoggerFactory.getILoggerFactory(); - //设置全局日志级别 - ch.qos.logback.classic.Logger logger = loggerContext.getLogger("root"); - logger.setLevel(Level.toLevel(level, Level.INFO)); - } } \ No newline at end of file diff --git a/core/src/main/java/com/dtstack/flink/sql/exec/ParamsInfo.java b/core/src/main/java/com/dtstack/flink/sql/exec/ParamsInfo.java index 59249827c..27cc7702d 100644 --- a/core/src/main/java/com/dtstack/flink/sql/exec/ParamsInfo.java +++ b/core/src/main/java/com/dtstack/flink/sql/exec/ParamsInfo.java @@ -38,11 +38,10 @@ public class ParamsInfo { private String remoteSqlPluginPath; private String pluginLoadMode; private String deployMode; - private String logLevel; private Properties confProp; public ParamsInfo(String sql, String name, List jarUrlList, String localSqlPluginPath, - String remoteSqlPluginPath, String pluginLoadMode, String deployMode, String logLevel, Properties confProp) { + String remoteSqlPluginPath, String pluginLoadMode, String deployMode, Properties confProp) { this.sql = sql; this.name = name; this.jarUrlList = jarUrlList; @@ -50,7 +49,6 @@ public ParamsInfo(String sql, String name, List jarUrlList, String localSql this.remoteSqlPluginPath = remoteSqlPluginPath; this.pluginLoadMode = pluginLoadMode; this.deployMode = deployMode; - this.logLevel = logLevel; this.confProp = confProp; } @@ -86,10 +84,6 @@ public Properties getConfProp() { return confProp; } - public String getLogLevel() { - return logLevel; - } - @Override public String toString() { return "ParamsInfo{" + @@ -100,7 +94,6 @@ public String toString() { ", remoteSqlPluginPath='" + remoteSqlPluginPath + '\'' + ", pluginLoadMode='" + pluginLoadMode + '\'' + ", deployMode='" + deployMode + '\'' + - ", logLevel='" + logLevel + '\'' + ", confProp=" + confProp + '}'; } @@ -160,10 +153,6 @@ public ParamsInfo.Builder setDeployMode(String deployMode) { return this; } - public ParamsInfo.Builder setLogLevel(String logLevel) { - this.logLevel = logLevel; - return this; - } public ParamsInfo.Builder setConfProp(Properties confProp) { this.confProp = confProp; @@ -172,7 +161,7 @@ public ParamsInfo.Builder setConfProp(Properties confProp) { public ParamsInfo build() { return new ParamsInfo(sql, name, jarUrlList, localSqlPluginPath, - remoteSqlPluginPath, pluginLoadMode, deployMode, logLevel, confProp); + remoteSqlPluginPath, pluginLoadMode, deployMode, confProp); } } } From 586c74a117b02aed822dbe311ca79419856137f3 Mon Sep 17 00:00:00 2001 From: maqi Date: Wed, 4 Mar 2020 10:22:35 +0800 Subject: [PATCH 11/47] es id number check --- ...asticsearch6Side => elasticsearch6Side.md} | 0 .../table/ElasticsearchTableInfo.java | 8 +++ .../sink/elasticsearch/CustomerSinkFunc.java | 38 ++++++------ .../sink/elasticsearch/ElasticsearchSink.java | 62 +++++++------------ .../table/ElasticsearchTableInfo.java | 8 +++ pom.xml | 2 +- 6 files changed, 56 insertions(+), 62 deletions(-) rename docs/{elasticsearch6Side => elasticsearch6Side.md} (100%) diff --git a/docs/elasticsearch6Side b/docs/elasticsearch6Side.md similarity index 100% rename from docs/elasticsearch6Side rename to docs/elasticsearch6Side.md diff --git a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java index ac1d712ce..b0439978e 100644 --- a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java +++ b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java @@ -23,6 +23,10 @@ import com.dtstack.flink.sql.table.TargetTableInfo; import com.google.common.base.Preconditions; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.math.NumberUtils; + +import java.util.Arrays; /** * @date 2018/09/12 @@ -131,6 +135,10 @@ public boolean check() { Preconditions.checkNotNull(id, "elasticsearch type of id is required"); Preconditions.checkNotNull(clusterName, "elasticsearch type of clusterName is required"); + Arrays.stream(StringUtils.split(id, ",")).forEach(number -> { + Preconditions.checkArgument(NumberUtils.isNumber(number), "id must be a numeric type"); + }); + if (isAuthMesh()) { Preconditions.checkNotNull(userName, "elasticsearch type of userName is required"); Preconditions.checkNotNull(password, "elasticsearch type of password is required"); diff --git a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java index d6d207c9e..b1c8a5e97 100644 --- a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java +++ b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java @@ -31,9 +31,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** * @author yinxi @@ -42,6 +42,8 @@ public class CustomerSinkFunc implements ElasticsearchSinkFunction { private final Logger logger = LoggerFactory.getLogger(CustomerSinkFunc.class); + /** 用作ID的属性值连接符号 */ + private static final String ID_VALUE_SPLIT = "_"; private String index; @@ -57,10 +59,7 @@ public class CustomerSinkFunc implements ElasticsearchSinkFunction { private transient Counter outDirtyRecords; - /** 默认分隔符为'_' */ - private char sp = '_'; - - public CustomerSinkFunc(String index, String type, List fieldNames, List fieldTypes, List idFieldIndexes){ + public CustomerSinkFunc(String index, String type, List fieldNames, List fieldTypes, List idFieldIndexes) { this.index = index; this.type = type; this.fieldNames = fieldNames; @@ -96,31 +95,30 @@ public void setOutDirtyRecords(Counter outDirtyRecords) { } private IndexRequest createIndexRequest(Row element) { + // index start at 1, + String idFieldStr = idFieldIndexList.stream() + .filter(index -> index > 0 && index <= element.getArity()) + .map(index -> element.getField(index - 1).toString()) + .collect(Collectors.joining(ID_VALUE_SPLIT)); - List idFieldList = new ArrayList<>(); - for(int index : idFieldIndexList){ - if(index >= element.getArity()){ - continue; - } - - idFieldList.add(element.getField(index).toString()); - } - - Map dataMap = Es6Util.rowToJsonMap(element,fieldNames,fieldTypes); + Map dataMap = Es6Util.rowToJsonMap(element, fieldNames, fieldTypes); int length = Math.min(element.getArity(), fieldNames.size()); - for(int i=0; i, IStreamSinkGener { - private final Logger logger = LoggerFactory.getLogger(ElasticsearchSink.class); + private final int ES_DEFAULT_PORT = 9200; + private final String ES_DEFAULT_SCHEMA = "http"; private String clusterName; @@ -103,32 +101,20 @@ public TypeInformation[] getFieldTypes() { private RichSinkFunction createEsSinkFunction() { - - Map userConfig = Maps.newHashMap(); userConfig.put("cluster.name", clusterName); // This instructs the sink to emit after every element, otherwise they would be buffered userConfig.put(org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSink.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "" + bulkFlushMaxActions); - List transports = new ArrayList<>(); - - for (String address : esAddressList) { - String[] infoArray = address.split(":"); - int port = 9200; - String host = infoArray[0]; - if (infoArray.length > 1) { - port = Integer.valueOf(infoArray[1].trim()); - } - - try { - transports.add(new HttpHost(host.trim(), port, "http")); - } catch (Exception e) { - logger.error("", e); - throw new RuntimeException(e); - } - } - CustomerSinkFunc customerSinkFunc = new CustomerSinkFunc(index, type, Arrays.asList(fieldNames), Arrays.asList(columnTypes), idIndexList); + List transports = esAddressList.stream() + .map(address -> address.split(":")) + .map(addressArray -> { + String host = addressArray[0].trim(); + int port = addressArray.length > 1 ? Integer.valueOf(addressArray[1].trim()) : ES_DEFAULT_PORT; + return new HttpHost(host.trim(), port, ES_DEFAULT_SCHEMA); + }).collect(Collectors.toList()); + CustomerSinkFunc customerSinkFunc = new CustomerSinkFunc(index, type, Arrays.asList(fieldNames), Arrays.asList(columnTypes), idIndexList); return new MetricElasticsearch6Sink(userConfig, transports, customerSinkFunc, esTableInfo); } @@ -151,23 +137,17 @@ public void setBulkFlushMaxActions(int bulkFlushMaxActions) { @Override public ElasticsearchSink genStreamSink(TargetTableInfo targetTableInfo) { - ElasticsearchTableInfo elasticsearchTableInfo = (ElasticsearchTableInfo) targetTableInfo; - esTableInfo = elasticsearchTableInfo; - clusterName = elasticsearchTableInfo.getClusterName(); - String address = elasticsearchTableInfo.getAddress(); - String[] addr = address.split(","); - esAddressList = Arrays.asList(addr); - index = elasticsearchTableInfo.getIndex(); - type = elasticsearchTableInfo.getEsType(); - String id = elasticsearchTableInfo.getId(); - String[] idField = StringUtils.split(id, ","); - idIndexList = new ArrayList<>(); - - for (int i = 0; i < idField.length; ++i) { - idIndexList.add(Integer.valueOf(idField[i])); + esTableInfo = (ElasticsearchTableInfo) targetTableInfo; + clusterName = esTableInfo.getClusterName(); + index = esTableInfo.getIndex(); + type = esTableInfo.getEsType(); + columnTypes = esTableInfo.getFieldTypes(); + esAddressList = Arrays.asList(esTableInfo.getAddress().split(",")); + + String id = esTableInfo.getId(); + if (!StringUtils.isEmpty(id)) { + idIndexList = Arrays.stream(StringUtils.split(id, ",")).map(Integer::valueOf).collect(Collectors.toList()); } - - columnTypes = elasticsearchTableInfo.getFieldTypes(); return this; } } diff --git a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java index b0156b660..5709d609c 100644 --- a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java +++ b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java @@ -20,6 +20,10 @@ import com.dtstack.flink.sql.table.TargetTableInfo; import com.google.common.base.Preconditions; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.math.NumberUtils; + +import java.util.Arrays; /** * @author yinxi @@ -127,6 +131,10 @@ public boolean check() { Preconditions.checkNotNull(id, "elasticsearch6 type of id is required"); Preconditions.checkNotNull(clusterName, "elasticsearch6 type of clusterName is required"); + Arrays.stream(StringUtils.split(id, ",")).forEach(number ->{ + Preconditions.checkArgument(NumberUtils.isNumber(number),"id must be a numeric type"); + }); + if (isAuthMesh()) { Preconditions.checkNotNull(userName, "elasticsearch6 type of userName is required"); Preconditions.checkNotNull(password, "elasticsearch6 type of password is required"); diff --git a/pom.xml b/pom.xml index 2794f9004..5493645e9 100644 --- a/pom.xml +++ b/pom.xml @@ -18,6 +18,7 @@ mysql hbase elasticsearch5 + elasticsearch6 mongo redis5 launcher @@ -33,7 +34,6 @@ impala db2 polardb - elasticsearch6 From 284d52d4ecb5dbcf51a1b379731f9b664586d183 Mon Sep 17 00:00:00 2001 From: maqi Date: Wed, 4 Mar 2020 13:23:54 +0800 Subject: [PATCH 12/47] id allow as null --- .../sink/elasticsearch/CustomerSinkFunc.java | 27 +++++++++++-------- .../table/ElasticsearchTableInfo.java | 9 ++++--- .../sink/elasticsearch/ElasticsearchSink.java | 10 +------ .../table/ElasticsearchTableInfo.java | 9 ++++--- 4 files changed, 27 insertions(+), 28 deletions(-) diff --git a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java index a95d477de..1f3efb8d7 100644 --- a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java +++ b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java @@ -35,6 +35,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** * Reason: @@ -47,6 +48,8 @@ public class CustomerSinkFunc implements ElasticsearchSinkFunction { private final Logger logger = LoggerFactory.getLogger(CustomerSinkFunc.class); + private static final String ID_VALUE_SPLIT = "_"; + private String index; private String type; @@ -93,15 +96,11 @@ public void setOutRecords(Counter outRecords) { } private IndexRequest createIndexRequest(Row element) { - - List idFieldList = new ArrayList<>(); - for(int index : idFieldIndexList){ - if(index >= element.getArity()){ - continue; - } - - idFieldList.add(element.getField(index).toString()); - } + // index start at 1, + String idFieldStr = idFieldIndexList.stream() + .filter(index -> index > 0 && index <= element.getArity()) + .map(index -> element.getField(index - 1).toString()) + .collect(Collectors.joining(ID_VALUE_SPLIT)); Map dataMap = EsUtil.rowToJsonMap(element,fieldNames,fieldTypes); int length = Math.min(element.getArity(), fieldNames.size()); @@ -109,11 +108,17 @@ private IndexRequest createIndexRequest(Row element) { dataMap.put(fieldNames.get(i), element.getField(i)); } - String id = StringUtils.join(idFieldList, sp); + if (StringUtils.isEmpty(idFieldStr)) { + return Requests.indexRequest() + .index(index) + .type(type) + .source(dataMap); + } + return Requests.indexRequest() .index(index) .type(type) - .id(id) + .id(idFieldStr) .source(dataMap); } } diff --git a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java index b0439978e..9681742c2 100644 --- a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java +++ b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java @@ -132,12 +132,13 @@ public boolean check() { Preconditions.checkNotNull(address, "elasticsearch type of address is required"); Preconditions.checkNotNull(index, "elasticsearch type of index is required"); Preconditions.checkNotNull(esType, "elasticsearch type of type is required"); - Preconditions.checkNotNull(id, "elasticsearch type of id is required"); Preconditions.checkNotNull(clusterName, "elasticsearch type of clusterName is required"); - Arrays.stream(StringUtils.split(id, ",")).forEach(number -> { - Preconditions.checkArgument(NumberUtils.isNumber(number), "id must be a numeric type"); - }); + if (!StringUtils.isEmpty(id)) { + Arrays.stream(StringUtils.split(id, ",")).forEach(number -> { + Preconditions.checkArgument(NumberUtils.isNumber(number), "id must be a numeric type"); + }); + } if (isAuthMesh()) { Preconditions.checkNotNull(userName, "elasticsearch type of userName is required"); diff --git a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java index cc650f670..2646c50e9 100644 --- a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java +++ b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java @@ -127,14 +127,6 @@ public void emitDataStream(DataStream> dataStream) { } } - public void setParallelism(int parallelism) { - this.parallelism = parallelism; - } - - public void setBulkFlushMaxActions(int bulkFlushMaxActions) { - this.bulkFlushMaxActions = bulkFlushMaxActions; - } - @Override public ElasticsearchSink genStreamSink(TargetTableInfo targetTableInfo) { esTableInfo = (ElasticsearchTableInfo) targetTableInfo; @@ -143,8 +135,8 @@ public ElasticsearchSink genStreamSink(TargetTableInfo targetTableInfo) { type = esTableInfo.getEsType(); columnTypes = esTableInfo.getFieldTypes(); esAddressList = Arrays.asList(esTableInfo.getAddress().split(",")); - String id = esTableInfo.getId(); + if (!StringUtils.isEmpty(id)) { idIndexList = Arrays.stream(StringUtils.split(id, ",")).map(Integer::valueOf).collect(Collectors.toList()); } diff --git a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java index 5709d609c..3cc3dd9ff 100644 --- a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java +++ b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java @@ -128,12 +128,13 @@ public boolean check() { Preconditions.checkNotNull(address, "elasticsearch6 type of address is required"); Preconditions.checkNotNull(index, "elasticsearch6 type of index is required"); Preconditions.checkNotNull(esType, "elasticsearch6 type of type is required"); - Preconditions.checkNotNull(id, "elasticsearch6 type of id is required"); Preconditions.checkNotNull(clusterName, "elasticsearch6 type of clusterName is required"); - Arrays.stream(StringUtils.split(id, ",")).forEach(number ->{ - Preconditions.checkArgument(NumberUtils.isNumber(number),"id must be a numeric type"); - }); + if (!StringUtils.isEmpty(id)) { + Arrays.stream(StringUtils.split(id, ",")).forEach(number -> { + Preconditions.checkArgument(NumberUtils.isNumber(number), "id must be a numeric type"); + }); + } if (isAuthMesh()) { Preconditions.checkNotNull(userName, "elasticsearch6 type of userName is required"); From 126a29564b43c6fafca7b0236e1e83228e42cccc Mon Sep 17 00:00:00 2001 From: dapeng Date: Wed, 4 Mar 2020 16:14:19 +0800 Subject: [PATCH 13/47] =?UTF-8?q?=E4=BB=A3=E7=A0=81=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../environment/StreamEnvConfigManager.java | 18 ++++++----- .../flink/sql/option/OptionParser.java | 4 +-- .../flink/sql/parser/CreateFuncParser.java | 8 ++--- .../flink/sql/side/SidePredicatesParser.java | 2 ++ .../dtstack/flink/sql/side/SideSQLParser.java | 4 +++ .../dtstack/flink/sql/side/SideSqlExec.java | 22 +++++++------ .../flink/sql/table/AbsSideTableParser.java | 2 +- .../com/dtstack/flink/sql/util/ClassUtil.java | 3 +- .../com/dtstack/flink/sql/util/DateUtil.java | 32 +++++++++---------- .../com/dtstack/flink/sql/util/JDBCUtils.java | 4 +-- 10 files changed, 56 insertions(+), 43 deletions(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java b/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java index ab74edabc..b453c9414 100644 --- a/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java +++ b/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java @@ -258,6 +258,8 @@ private static Optional createStateBackend(String backendType, Str checkpointDataUriEmptyCheck(checkpointDataUri, backendType); stateBackend = new RocksDBStateBackend(checkpointDataUri, BooleanUtils.toBoolean(backendIncremental)); break; + default: + break; } return stateBackend == null ? Optional.empty() : Optional.of(stateBackend); } @@ -317,14 +319,14 @@ private static void verityTtl(String ttlMintimeStr, String ttlMaxtimeStr) { * @return */ private static Long getTtlTime(Integer timeNumber, String timeUnit) { - if (timeUnit.equalsIgnoreCase("d")) { - return timeNumber * 1000l * 60 * 60 * 24; - } else if (timeUnit.equalsIgnoreCase("h")) { - return timeNumber * 1000l * 60 * 60; - } else if (timeUnit.equalsIgnoreCase("m")) { - return timeNumber * 1000l * 60; - } else if (timeUnit.equalsIgnoreCase("s")) { - return timeNumber * 1000l; + if ("d".equalsIgnoreCase(timeUnit)) { + return timeNumber * 1000L * 60 * 60 * 24; + } else if ("h".equalsIgnoreCase(timeUnit)) { + return timeNumber * 1000L * 60 * 60; + } else if ("m".equalsIgnoreCase(timeUnit)) { + return timeNumber * 1000L * 60; + } else if ("s".equalsIgnoreCase(timeUnit)) { + return timeNumber * 1000L; } else { throw new RuntimeException("not support " + timeNumber + timeUnit); } diff --git a/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java b/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java index 43f599d14..0e8bda1fe 100644 --- a/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java @@ -114,7 +114,7 @@ public List getProgramExeArgList() throws Exception { } public static void main(String[] args) throws Exception { - OptionParser OptionParser = new OptionParser(args); - System.out.println(OptionParser.getOptions()); + OptionParser optionParser = new OptionParser(args); + System.out.println(optionParser.getOptions()); } } diff --git a/core/src/main/java/com/dtstack/flink/sql/parser/CreateFuncParser.java b/core/src/main/java/com/dtstack/flink/sql/parser/CreateFuncParser.java index 670d98a7e..fc6a7e16c 100644 --- a/core/src/main/java/com/dtstack/flink/sql/parser/CreateFuncParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/parser/CreateFuncParser.java @@ -32,18 +32,18 @@ public class CreateFuncParser implements IParser { - private static final String funcPatternStr = "(?i)\\s*create\\s+(scala|table|aggregate)\\s+function\\s+(\\S+)\\s+WITH\\s+(\\S+)"; + private static final String FUNC_PATTERN_STR = "(?i)\\s*create\\s+(scala|table|aggregate)\\s+function\\s+(\\S+)\\s+WITH\\s+(\\S+)"; - private static final Pattern funcPattern = Pattern.compile(funcPatternStr); + private static final Pattern FUNC_PATTERN = Pattern.compile(FUNC_PATTERN_STR); @Override public boolean verify(String sql) { - return funcPattern.matcher(sql).find(); + return FUNC_PATTERN.matcher(sql).find(); } @Override public void parseSql(String sql, SqlTree sqlTree) { - Matcher matcher = funcPattern.matcher(sql); + Matcher matcher = FUNC_PATTERN.matcher(sql); if(matcher.find()){ String type = matcher.group(1); String funcName = matcher.group(2); diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SidePredicatesParser.java b/core/src/main/java/com/dtstack/flink/sql/side/SidePredicatesParser.java index 50103a9f5..64f9e646b 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/SidePredicatesParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/SidePredicatesParser.java @@ -101,6 +101,8 @@ private void parseSql(SqlNode sqlNode, Map sideTableMap, parseSql(unionLeft, sideTableMap, tabMapping); parseSql(unionRight, sideTableMap, tabMapping); break; + default: + break; } } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java b/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java index c12bc981e..b7d886566 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java @@ -131,6 +131,8 @@ private void checkAndReplaceMultiJoin(SqlNode sqlNode, Set sideTableSet) checkAndReplaceMultiJoin(unionLeft, sideTableSet); checkAndReplaceMultiJoin(unionRight, sideTableSet); break; + default: + break; } } @@ -204,6 +206,8 @@ private Object parseSql(SqlNode sqlNode, Set sideTableSet, Queue case ORDER_BY: SqlOrderBy sqlOrderBy = (SqlOrderBy) sqlNode; parseSql(sqlOrderBy.query, sideTableSet, queueInfo, parentWhere, parentSelectList); + default: + break; } return ""; } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java b/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java index d54c87c17..1689949ca 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java @@ -90,7 +90,7 @@ public class SideSqlExec { private String tmpFields = null; - private SideSQLParser sideSQLParser = new SideSQLParser(); + private SideSQLParser sideSqlParser = new SideSQLParser(); private SidePredicatesParser sidePredicatesParser = new SidePredicatesParser(); private Map localTableCache = Maps.newHashMap(); @@ -109,8 +109,8 @@ public void exec(String sql, Map sideTableMap, StreamTabl LOG.error("fill predicates for sideTable fail ", e); } - sideSQLParser.setLocalTableCache(localTableCache); - Queue exeQueue = sideSQLParser.getExeQueue(sql, sideTableMap.keySet()); + sideSqlParser.setLocalTableCache(localTableCache); + Queue exeQueue = sideSqlParser.getExeQueue(sql, sideTableMap.keySet()); Object pollObj = null; //need clean @@ -140,7 +140,7 @@ public void exec(String sql, Map sideTableMap, StreamTabl LOG.info("exec sql: " + pollSqlNode.toString()); } }else if(pollSqlNode.getKind() == AS){ - AliasInfo aliasInfo = parseASNode(pollSqlNode); + AliasInfo aliasInfo = parseAsNode(pollSqlNode); Table table = tableEnv.sqlQuery(aliasInfo.getName()); tableEnv.registerTable(aliasInfo.getAlias(), table); localTableCache.put(aliasInfo.getAlias(), table); @@ -151,9 +151,9 @@ public void exec(String sql, Map sideTableMap, StreamTabl } } else if (pollSqlNode.getKind() == WITH_ITEM) { SqlWithItem sqlWithItem = (SqlWithItem) pollSqlNode; - String TableAlias = sqlWithItem.name.toString(); + String tableAlias = sqlWithItem.name.toString(); Table table = tableEnv.sqlQuery(sqlWithItem.query.toString()); - tableEnv.registerTable(TableAlias, table); + tableEnv.registerTable(tableAlias, table); } }else if (pollObj instanceof JoinInfo){ @@ -253,11 +253,13 @@ private void addAliasForFieldNode(SqlNode pollSqlNode, List fieldList, H } } break; + default: + break; } } - public AliasInfo parseASNode(SqlNode sqlNode) throws SqlParseException { + public AliasInfo parseAsNode(SqlNode sqlNode) throws SqlParseException { SqlKind sqlKind = sqlNode.getKind(); if(sqlKind != AS){ throw new RuntimeException(sqlNode + " is not 'as' operator"); @@ -487,6 +489,8 @@ public SqlNode filterNodeWithTargetName(SqlNode sqlNode, String targetTableName) }else{ return null; } + default: + break; } return null; @@ -709,7 +713,7 @@ public void registerTmpTable(CreateTmpTableParser.SqlParserResult result, } localTableCache.putAll(tableCache); - Queue exeQueue = sideSQLParser.getExeQueue(result.getExecSql(), sideTableMap.keySet()); + Queue exeQueue = sideSqlParser.getExeQueue(result.getExecSql(), sideTableMap.keySet()); Object pollObj = null; //need clean @@ -759,7 +763,7 @@ public void registerTmpTable(CreateTmpTableParser.SqlParserResult result, } protected void dealAsSourceTable(StreamTableEnvironment tableEnv, SqlNode pollSqlNode) throws SqlParseException { - AliasInfo aliasInfo = parseASNode(pollSqlNode); + AliasInfo aliasInfo = parseAsNode(pollSqlNode); if (localTableCache.containsKey(aliasInfo.getName())) { return; } diff --git a/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java b/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java index 520ca2138..49c5629bc 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java @@ -87,7 +87,7 @@ protected void parseCacheProp(SideTableInfo sideTableInfo, Map p if(props.containsKey(SideTableInfo.CACHE_MODE_KEY.toLowerCase())){ String cachemode = MathUtil.getString(props.get(SideTableInfo.CACHE_MODE_KEY.toLowerCase())); - if(!cachemode.equalsIgnoreCase("ordered") && !cachemode.equalsIgnoreCase("unordered")){ + if(!"ordered".equalsIgnoreCase(cachemode) && !"unordered".equalsIgnoreCase(cachemode)){ throw new RuntimeException("cachemode must ordered or unordered!"); } sideTableInfo.setCacheMode(cachemode.toLowerCase()); diff --git a/core/src/main/java/com/dtstack/flink/sql/util/ClassUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/ClassUtil.java index feebbe3f1..4bb4ff0d8 100644 --- a/core/src/main/java/com/dtstack/flink/sql/util/ClassUtil.java +++ b/core/src/main/java/com/dtstack/flink/sql/util/ClassUtil.java @@ -86,7 +86,8 @@ public static Class stringConvertClass(String str) { case "decimal": case "decimalunsigned": return BigDecimal.class; - + default: + break; } throw new RuntimeException("不支持 " + str + " 类型"); diff --git a/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java index 5bfa2f203..da5a42b57 100644 --- a/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java +++ b/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java @@ -116,9 +116,9 @@ public static long getTodayStart(long day) { * @return */ public static long getTodayStart(long day,String scope) { - if(scope.equals("MS")){ + if("MS".equals(scope)){ return getTodayStart(day)*1000; - }else if(scope.equals("S")){ + }else if("S".equals(scope)){ return getTodayStart(day); }else{ return getTodayStart(day); @@ -154,9 +154,9 @@ public static long getNextDayStart(long day) { * @return */ public static long getNextDayStart(long day,String scope) { - if(scope.equals("MS")){ + if("MS".equals(scope)){ return getNextDayStart(day)*1000; - }else if(scope.equals("S")){ + }else if("S".equals(scope)){ return getNextDayStart(day); }else{ return getNextDayStart(day); @@ -335,7 +335,7 @@ public static String get30DaysLaterByString(String day, String inFormat, String * @return String * @throws ParseException */ - public static String getDateStrTOFormat(String day, String inFormat, String outFormat) throws ParseException { + public static String getDateStrToFormat(String day, String inFormat, String outFormat) throws ParseException { SimpleDateFormat sdf = new SimpleDateFormat(inFormat); Date date = sdf.parse(day); Calendar calendar = Calendar.getInstance(); @@ -344,7 +344,7 @@ public static String getDateStrTOFormat(String day, String inFormat, String outF return dayBefore; } - public static long getDateMillTOFormat(String day, String inFormat) throws ParseException { + public static long getDateMillToFormat(String day, String inFormat) throws ParseException { SimpleDateFormat sdf = new SimpleDateFormat(inFormat); Date date = sdf.parse(day); Calendar calendar = Calendar.getInstance(); @@ -470,11 +470,11 @@ public static long getMillByDay(int severalDays,String condition) { if(condition==null){ return getMillToDay(cal,dateT); } - if(condition.equals("-")){ + if("-".equals(condition)){ dateT = (cal.get(Calendar.DATE) - severalDays); return getMillToDay(cal,dateT); } - if(condition.equals("+")){ + if("+".equals(condition)){ dateT = (cal.get(Calendar.DATE) + severalDays); return getMillToDay(cal,dateT); } @@ -490,11 +490,11 @@ public static long getStampByDay(int severalDays,String condition) { if(condition==null){ return getStampToDay(cal,dateT); } - if(condition.equals("-")){ + if("-".equals(condition)){ dateT = (cal.get(Calendar.DATE) - severalDays); return getStampToDay(cal,dateT); } - if(condition.equals("+")){ + if("+".equals(condition)){ dateT = (cal.get(Calendar.DATE) + severalDays); return getStampToDay(cal,dateT); } @@ -575,8 +575,8 @@ public static String getDate(Date date, String format) { */ public static long stringToLong(String day, String format) throws ParseException { SimpleDateFormat dateFormat = new SimpleDateFormat(format); - long Date = dateFormat.parse(day).getTime(); - return Date; + long date = dateFormat.parse(day).getTime(); + return date; } /** @@ -588,8 +588,8 @@ public static long stringToLong(String day, String format) throws ParseException public static Date stringToDate(String day, String format) { try { SimpleDateFormat dateFormat = new SimpleDateFormat(format); - Date Date = dateFormat.parse(day); - return Date; + Date date = dateFormat.parse(day); + return date; } catch (ParseException e) { return new Date(); } @@ -608,8 +608,8 @@ public static String longToString(long day, String format) throws ParseException day=day*1000; } SimpleDateFormat dateFormat = new SimpleDateFormat(format); - String Date = dateFormat.format(day); - return Date; + String date = dateFormat.format(day); + return date; } /** diff --git a/core/src/main/java/com/dtstack/flink/sql/util/JDBCUtils.java b/core/src/main/java/com/dtstack/flink/sql/util/JDBCUtils.java index fde2f166e..3d66d8a6d 100644 --- a/core/src/main/java/com/dtstack/flink/sql/util/JDBCUtils.java +++ b/core/src/main/java/com/dtstack/flink/sql/util/JDBCUtils.java @@ -28,10 +28,10 @@ public class JDBCUtils { private static final Logger LOG = LoggerFactory.getLogger(ClassUtil.class); - public final static String lock_str = "jdbc_lock_str"; + public final static String LOCK_STR = "jdbc_lock_str"; public static void forName(String clazz, ClassLoader classLoader) { - synchronized (lock_str){ + synchronized (LOCK_STR){ try { Class.forName(clazz, true, classLoader); DriverManager.setLoginTimeout(10); From e0f289be82541f7f630e91f9506f8be08ef63ae4 Mon Sep 17 00:00:00 2001 From: dapeng Date: Wed, 4 Mar 2020 16:47:04 +0800 Subject: [PATCH 14/47] codeview fix --- .../flink/sql/sink/console/table/TablePrintUtil.java | 12 +++++++++--- .../flink/sql/launcher/ClusterClientFactory.java | 4 ++-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java index 8813da619..39abb81b4 100644 --- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java +++ b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java @@ -58,7 +58,9 @@ public static TablePrintUtil build(String[][] data) { public static TablePrintUtil build(List data) { TablePrintUtil self = new TablePrintUtil(); self.data = new ArrayList<>(); - if (data.size() <= 0) throw new RuntimeException("数据源至少得有一行吧"); + if (data.size() <= 0) { + throw new RuntimeException("数据源至少得有一行吧"); + } Object obj = data.get(0); @@ -70,7 +72,9 @@ public static TablePrintUtil build(List data) { int length = ((List) obj).size(); for (Object item : data) { List col = (List) item; - if (col.size() != length) throw new RuntimeException("数据源每列长度必须一致"); + if (col.size() != length) { + throw new RuntimeException("数据源每列长度必须一致"); + } self.data.add(col.toArray(new String[length])); } } else { @@ -163,7 +167,9 @@ private int[] getColLengths() { if (equilong) {//如果等宽表格 int max = 0; for (int len : result) { - if (len > max) max = len; + if (len > max) { + max = len; + } } for (int i = 0; i < result.length; i++) { result[i] = max; diff --git a/launcher/src/main/java/com/dtstack/flink/sql/launcher/ClusterClientFactory.java b/launcher/src/main/java/com/dtstack/flink/sql/launcher/ClusterClientFactory.java index 14cd847b5..7f4ded520 100644 --- a/launcher/src/main/java/com/dtstack/flink/sql/launcher/ClusterClientFactory.java +++ b/launcher/src/main/java/com/dtstack/flink/sql/launcher/ClusterClientFactory.java @@ -89,7 +89,7 @@ public static ClusterClient createYarnSessionClient(Options launcherOptions) { if (StringUtils.isNotBlank(yarnConfDir)) { try { - config.setString(ConfigConstants.PATH_HADOOP_CONFIG, yarnConfDir); + config.setString("fs.hdfs.hadoopconf", yarnConfDir); FileSystem.initialize(config); YarnConfiguration yarnConf = YarnConfLoader.getYarnConf(yarnConfDir); @@ -166,7 +166,7 @@ private static ApplicationId getYarnClusterApplicationId(YarnClient yarnClient) private static ApplicationId toApplicationId(String appIdStr) { Iterator it = StringHelper._split(appIdStr).iterator(); - if (!(it.next()).equals("application")) { + if (!"application".equals(it.next())) { throw new IllegalArgumentException("Invalid ApplicationId prefix: " + appIdStr + ". The valid ApplicationId should start with prefix " + "application"); } else { try { From e51f1cfdf5ccf41410998242c8f78a22d0875028 Mon Sep 17 00:00:00 2001 From: dapeng Date: Wed, 4 Mar 2020 17:04:12 +0800 Subject: [PATCH 15/47] codereview --- .../flink/sql/side/cassandra/table/CassandraSideParser.java | 3 +++ .../flink/sql/side/clickhouse/ClickhouseAllReqRow.java | 6 +++--- .../flink/sql/sink/console/table/TablePrintUtil.java | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/cassandra/cassandra-side/cassandra-side-core/src/main/java/com/dtstack/flink/sql/side/cassandra/table/CassandraSideParser.java b/cassandra/cassandra-side/cassandra-side-core/src/main/java/com/dtstack/flink/sql/side/cassandra/table/CassandraSideParser.java index 62dd753b9..d2b93563b 100644 --- a/cassandra/cassandra-side/cassandra-side-core/src/main/java/com/dtstack/flink/sql/side/cassandra/table/CassandraSideParser.java +++ b/cassandra/cassandra-side/cassandra-side-core/src/main/java/com/dtstack/flink/sql/side/cassandra/table/CassandraSideParser.java @@ -99,6 +99,7 @@ public TableInfo getTableInfo(String tableName, String fieldsInfo, Map getColList(Object obj) { Method[] methods = obj.getClass().getMethods(); for (Method m : methods) { StringBuilder getMethodName = new StringBuilder(m.getName()); - if (getMethodName.substring(0, 3).equals("get") && !m.getName().equals("getClass")) { + if ("get".equals(getMethodName.substring(0, 3)) && !"getClass".equals(m.getName())) { Col col = new Col(); col.getMethodName = getMethodName.toString(); char first = Character.toLowerCase(getMethodName.delete(0, 3).charAt(0)); From 841c1fe931d0e1bdb838b4eea9034123a5b61e81 Mon Sep 17 00:00:00 2001 From: zoudaokoulife Date: Wed, 4 Mar 2020 18:12:19 +0800 Subject: [PATCH 16/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=90=88=E5=B9=B6?= =?UTF-8?q?=E5=86=B2=E7=AA=81=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../flink/sql/exec/ExecuteProcessHelper.java | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java index d2c2926ef..5cd1248e2 100644 --- a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java +++ b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java @@ -188,17 +188,23 @@ public static List getExternalJarUrls(String addJarListStr) throws java.io. return jarUrlList; } - public static void sqlTranslation(String localSqlPluginPath, StreamTableEnvironment tableEnv, SqlTree sqlTree, Map sideTableMap, Map registerTableCache, StreamQueryConfig queryConfig) throws Exception { + private static void sqlTranslation(String localSqlPluginPath, + StreamTableEnvironment tableEnv, + SqlTree sqlTree,Map sideTableMap, + Map registerTableCache, + StreamQueryConfig queryConfig) throws Exception { + SideSqlExec sideSqlExec = new SideSqlExec(); sideSqlExec.setLocalSqlPluginPath(localSqlPluginPath); for (CreateTmpTableParser.SqlParserResult result : sqlTree.getTmpSqlList()) { - sideSqlExec.registerTmpTable(result, sideTableMap, tableEnv, registerTableCache); + sideSqlExec.exec(result.getExecSql(), sideTableMap, tableEnv, registerTableCache, queryConfig, result); } for (InsertSqlParser.SqlParseResult result : sqlTree.getExecSqlList()) { if (LOG.isInfoEnabled()) { LOG.info("exe-sql:\n" + result.getExecSql()); } + boolean isSide = false; for (String tableName : result.getTargetTableList()) { if (sqlTree.getTmpTableMap().containsKey(tableName)) { @@ -208,7 +214,7 @@ public static void sqlTranslation(String localSqlPluginPath, StreamTableEnvironm SqlNode sqlNode = org.apache.calcite.sql.parser.SqlParser.create(realSql, CalciteConfig.MYSQL_LEX_CONFIG).parseStmt(); String tmpSql = ((SqlInsert) sqlNode).getSource().toString(); tmp.setExecSql(tmpSql); - sideSqlExec.registerTmpTable(tmp, sideTableMap, tableEnv, registerTableCache); + sideSqlExec.exec(tmp.getExecSql(), sideTableMap, tableEnv, registerTableCache, queryConfig, tmp); } else { for (String sourceTable : result.getSourceTableList()) { if (sideTableMap.containsKey(sourceTable)) { @@ -218,10 +224,14 @@ public static void sqlTranslation(String localSqlPluginPath, StreamTableEnvironm } if (isSide) { //sql-dimensional table contains the dimension table of execution - sideSqlExec.exec(result.getExecSql(), sideTableMap, tableEnv, registerTableCache, queryConfig); + sideSqlExec.exec(result.getExecSql(), sideTableMap, tableEnv, registerTableCache, queryConfig, null); } else { + System.out.println("----------exec sql without dimension join-----------"); + System.out.println("----------real sql exec is--------------------------"); + System.out.println(result.getExecSql()); FlinkSQLExec.sqlUpdate(tableEnv, result.getExecSql(), queryConfig); if (LOG.isInfoEnabled()) { + System.out.println(); LOG.info("exec sql: " + result.getExecSql()); } } From f56801c3180bef5347adced6a352dd6e190c7f2d Mon Sep 17 00:00:00 2001 From: dapeng Date: Wed, 4 Mar 2020 22:41:53 +0800 Subject: [PATCH 17/47] code review --- .../side/cassandra/CassandraAllReqRow.java | 10 ++- .../side/cassandra/CassandraAllSideInfo.java | 10 +-- .../side/cassandra/CassandraAsyncReqRow.java | 9 ++- .../cassandra/CassandraAsyncSideInfo.java | 10 +-- .../cassandra/table/CassandraSideParser.java | 14 ++--- .../table/CassandraSideTableInfo.java | 4 +- .../sink/cassandra/CassandraOutputFormat.java | 4 +- .../sql/sink/cassandra/CassandraSink.java | 4 +- .../cassandra/table/CassandraSinkParser.java | 10 +-- .../cassandra/table/CassandraTableInfo.java | 4 +- .../side/clickhouse/ClickhouseAllReqRow.java | 4 +- .../clickhouse/ClickhouseAllSideInfo.java | 4 +- .../clickhouse/ClickhouseAsyncReqRow.java | 4 +- .../clickhouse/ClickhouseAsyncSideInfo.java | 4 +- .../table/ClickhouseSideParser.java | 6 +- .../table/ClickhouseSinkParser.java | 6 +- .../sql/sink/console/ConsoleOutputFormat.java | 4 +- .../flink/sql/sink/console/ConsoleSink.java | 4 +- .../sink/console/table/ConsoleSinkParser.java | 10 +-- .../sink/console/table/ConsoleTableInfo.java | 4 +- .../flink/sql/constrant/ConfigConstrant.java | 6 +- .../dtstack/flink/sql/enums/ColumnType.java | 63 +++++++++++++++++-- .../flink/sql/enums/ECacheContentType.java | 9 +++ .../dtstack/flink/sql/enums/ECacheType.java | 13 +++- .../flink/sql/enums/EDatabaseType.java | 9 +++ .../flink/sql/enums/EPluginLoadMode.java | 6 ++ .../flink/sql/enums/EStateBackend.java | 9 +++ .../environment/StreamEnvConfigManager.java | 6 +- .../flink/sql/exec/ExecuteProcessHelper.java | 36 +++++------ .../DtNestRowDeserializationSchema.java | 8 +-- .../flink/sql/option/OptionParser.java | 2 +- ...t.java => AbstractDtRichOutputFormat.java} | 2 +- .../dtstack/flink/sql/parser/SqlParser.java | 12 ++-- .../com/dtstack/flink/sql/parser/SqlTree.java | 8 +-- ...leInfo.java => AbstractSideTableInfo.java} | 4 +- .../{AllReqRow.java => BaseAllReqRow.java} | 8 +-- ...{AsyncReqRow.java => BaseAsyncReqRow.java} | 14 ++--- .../side/{SideInfo.java => BaseSideInfo.java} | 22 +++---- .../flink/sql/side/SidePredicatesParser.java | 7 +-- .../dtstack/flink/sql/side/SideSqlExec.java | 14 ++--- .../flink/sql/side/StreamSideFactory.java | 10 +-- ...sSideCache.java => AbstractSideCache.java} | 8 +-- .../flink/sql/side/cache/LRUSideCache.java | 6 +- .../sql/side/operator/SideAsyncOperator.java | 16 ++--- .../operator/SideWithAllCacheOperator.java | 18 +++--- .../flink/sql/sink/IStreamSinkGener.java | 4 +- .../flink/sql/sink/StreamSinkFactory.java | 12 ++-- .../flink/sql/source/IStreamSourceGener.java | 4 +- .../flink/sql/source/StreamSourceFactory.java | 12 ++-- ...rser.java => AbstractSideTableParser.java} | 38 +++++------ ...eParser.java => AbstractSourceParser.java} | 16 ++--- ...Info.java => AbstractSourceTableInfo.java} | 2 +- ...{TableInfo.java => AbstractTableInfo.java} | 2 +- ...rser.java => AbstractTableInfoParser.java} | 18 +++--- ...leParser.java => AbstractTableParser.java} | 16 ++--- ...Info.java => AbstractTargetTableInfo.java} | 2 +- .../sql/table/ITableFieldDealHandler.java | 2 +- .../com/dtstack/flink/sql/util/DateUtil.java | 1 - .../dtstack/flink/sql/util/PluginUtil.java | 10 +-- ....java => AbstractCustomerWaterMarker.java} | 4 +- .../CustomerWaterMarkerForLong.java | 2 +- .../CustomerWaterMarkerForTimeStamp.java | 3 +- .../sql/watermarker/WaterMarkerAssigner.java | 8 +-- .../sql/side/SidePredicatesParserTest.java | 4 +- .../flink/sql/side/db2/Db2AllReqRow.java | 8 +-- .../flink/sql/side/db2/Db2AllSideInfo.java | 4 +- .../flink/sql/side/db2/Db2AsyncReqRow.java | 4 +- .../flink/sql/side/db2/Db2AsyncSideInfo.java | 4 +- .../sql/side/db2/table/Db2SideParser.java | 6 +- .../flink/sql/sink/db/table/DbSinkParser.java | 6 +- .../sink/elasticsearch/ElasticsearchSink.java | 4 +- .../table/ElasticsearchSinkParser.java | 8 +-- .../table/ElasticsearchTableInfo.java | 4 +- .../flink/sql/side/hbase/HbaseAllReqRow.java | 8 +-- .../sql/side/hbase/HbaseAllSideInfo.java | 10 +-- .../sql/side/hbase/HbaseAsyncReqRow.java | 14 ++--- .../sql/side/hbase/HbaseAsyncSideInfo.java | 10 +-- ...ler.java => AbstractRowKeyModeDealer.java} | 12 ++-- .../PreRowKeyModeDealerDealer.java | 9 ++- .../rowkeydealer/RowKeyEqualModeDealer.java | 6 +- ...ator.java => AbstractReplaceOperator.java} | 4 +- .../sql/side/hbase/Md5ReplaceOperator.java | 2 +- .../sql/side/hbase/enums/EReplaceOpType.java | 9 ++- .../sql/side/hbase/enums/EReplaceType.java | 6 ++ .../sql/side/hbase/table/HbaseSideParser.java | 12 ++-- .../side/hbase/table/HbaseSideTableInfo.java | 4 +- .../sql/side/hbase/utils/HbaseUtils.java | 2 + .../sql/sink/hbase/HbaseOutputFormat.java | 4 +- .../flink/sql/sink/hbase/HbaseSink.java | 5 +- .../sql/sink/hbase/table/HbaseSinkParser.java | 10 +-- .../sql/sink/hbase/table/HbaseTableInfo.java | 4 +- hbase/pom.xml | 7 +++ .../sql/side/impala/ImpalaAllReqRow.java | 6 +- .../sql/side/impala/ImpalaAllSideInfo.java | 7 +-- .../sql/side/impala/ImpalaAsyncReqRow.java | 4 +- .../sql/side/impala/ImpalaAsyncSideInfo.java | 6 +- .../side/impala/table/ImpalaSideParser.java | 7 ++- .../flink/sql/sink/impala/ImpalaSink.java | 4 +- .../sink/impala/table/ImpalaSinkParser.java | 7 ++- .../sql/sink/kafka/table/KafkaSinkParser.java | 8 +-- .../sink/kafka/table/KafkaSinkTableInfo.java | 5 +- .../source/kafka/table/KafkaSourceParser.java | 10 +-- .../kafka/table/KafkaSourceTableInfo.java | 4 +- .../flink/sql/sink/kafka/KafkaSink.java | 4 +- .../flink/sql/source/kafka/KafkaSource.java | 6 +- .../flink/sql/sink/kafka/KafkaSink.java | 4 +- .../flink/sql/source/kafka/KafkaSource.java | 6 +- .../flink/sql/sink/kafka/KafkaSink.java | 4 +- .../flink/sql/source/kafka/KafkaSource.java | 6 +- .../flink/sql/sink/kafka/KafkaSink.java | 4 +- .../flink/sql/source/kafka/KafkaSource.java | 6 +- .../flink/sql/side/kudu/KuduAllReqRow.java | 8 +-- .../flink/sql/side/kudu/KuduAllSideInfo.java | 10 +-- .../flink/sql/side/kudu/KuduAsyncReqRow.java | 4 +- .../sql/side/kudu/KuduAsyncSideInfo.java | 10 +-- .../sql/side/kudu/table/KuduSideParser.java | 10 +-- .../side/kudu/table/KuduSideTableInfo.java | 4 +- .../flink/sql/sink/kudu/KuduOutputFormat.java | 4 +- .../dtstack/flink/sql/sink/kudu/KuduSink.java | 4 +- .../sql/sink/kudu/table/KuduSinkParser.java | 10 +-- .../sql/sink/kudu/table/KuduTableInfo.java | 4 +- .../flink/sql/side/mongo/MongoAllReqRow.java | 8 +-- .../sql/side/mongo/MongoAllSideInfo.java | 10 +-- .../sql/side/mongo/MongoAsyncReqRow.java | 14 ++--- .../sql/side/mongo/MongoAsyncSideInfo.java | 13 ++-- .../sql/side/mongo/table/MongoSideParser.java | 12 ++-- .../side/mongo/table/MongoSideTableInfo.java | 4 +- .../sql/sink/mongo/MongoOutputFormat.java | 4 +- .../flink/sql/sink/mongo/MongoSink.java | 4 +- .../sql/sink/mongo/table/MongoSinkParser.java | 10 +-- .../sql/sink/mongo/table/MongoTableInfo.java | 4 +- .../flink/sql/side/mysql/MysqlAllReqRow.java | 4 +- .../sql/side/mysql/MysqlAllSideInfo.java | 4 +- .../sql/side/mysql/MysqlAsyncReqRow.java | 4 +- .../sql/side/mysql/MysqlAsyncSideInfo.java | 4 +- .../sql/side/mysql/table/MysqlSideParser.java | 6 +- .../sql/sink/mysql/table/MysqlSinkParser.java | 6 +- .../sql/side/oracle/OracleAllReqRow.java | 4 +- .../sql/side/oracle/OracleAllSideInfo.java | 5 +- .../sql/side/oracle/OracleAsyncReqRow.java | 4 +- .../sql/side/oracle/OracleAsyncSideInfo.java | 8 +-- .../side/oracle/table/OracleSideParser.java | 6 +- .../sink/oracle/table/OracleSinkParser.java | 6 +- .../sql/side/polardb/PolardbAllReqRow.java | 4 +- .../sql/side/polardb/PolardbAllSideInfo.java | 4 +- .../sql/side/polardb/PolardbAsyncReqRow.java | 4 +- .../side/polardb/PolardbAsyncSideInfo.java | 4 +- .../side/polardb/table/PolardbSideParser.java | 6 +- .../sink/polardb/table/PolardbSinkParser.java | 6 +- .../side/postgresql/PostgresqlAllReqRow.java | 4 +- .../postgresql/PostgresqlAllSideInfo.java | 4 +- .../postgresql/PostgresqlAsyncReqRow.java | 4 +- .../postgresql/PostgresqlAsyncSideInfo.java | 4 +- .../table/PostgresqlSideParser.java | 6 +- .../table/PostgresqlSinkParser.java | 6 +- .../side/rdb/all/AbstractRdbAllReqRow.java | 8 +-- .../sql/side/rdb/all/RdbAllSideInfo.java | 10 +-- .../sql/side/rdb/async/RdbAsyncReqRow.java | 8 +-- .../sql/side/rdb/async/RdbAsyncSideInfo.java | 11 ++-- .../sql/side/rdb/table/RdbSideParser.java | 8 +-- .../sql/side/rdb/table/RdbSideTableInfo.java | 4 +- .../flink/sql/sink/rdb/AbstractRdbSink.java | 4 +- .../rdb/format/AbstractJDBCOutputFormat.java | 4 +- .../sql/sink/rdb/table/RdbSinkParser.java | 9 ++- .../sql/sink/rdb/table/RdbTableInfo.java | 6 +- .../sink/rdb/writer/AbstractUpsertWriter.java | 12 ++-- .../sql/sink/rdb/writer/AppendOnlyWriter.java | 6 +- .../flink/sql/side/redis/RedisAllReqRow.java | 4 +- .../sql/side/redis/RedisAllSideInfo.java | 10 +-- .../sql/side/redis/RedisAsyncReqRow.java | 4 +- .../sql/side/redis/RedisAsyncSideInfo.java | 10 +-- .../sql/side/redis/table/RedisSideParser.java | 8 +-- .../sql/side/redis/table/RedisSideReqRow.java | 8 +-- .../side/redis/table/RedisSideTableInfo.java | 4 +- .../sql/sink/redis/RedisOutputFormat.java | 4 +- .../flink/sql/sink/redis/RedisSink.java | 4 +- .../sql/sink/redis/table/RedisSinkParser.java | 8 +-- .../sql/sink/redis/table/RedisTableInfo.java | 4 +- .../CustomerSocketTextStreamFunction.java | 4 +- .../serversocket/ServersocketSource.java | 4 +- .../table/ServersocketSourceParser.java | 8 +-- .../table/ServersocketSourceTableInfo.java | 4 +- .../side/sqlserver/SqlserverAllReqRow.java | 4 +- .../side/sqlserver/SqlserverAllSideInfo.java | 4 +- .../side/sqlserver/SqlserverAsyncReqRow.java | 2 +- .../sqlserver/SqlserverAsyncSideInfo.java | 4 +- .../sqlserver/table/SqlserverSideParser.java | 6 +- .../sqlserver/table/SqlserverSinkParser.java | 6 +- 188 files changed, 731 insertions(+), 640 deletions(-) rename core/src/main/java/com/dtstack/flink/sql/outputformat/{DtRichOutputFormat.java => AbstractDtRichOutputFormat.java} (95%) rename core/src/main/java/com/dtstack/flink/sql/side/{SideTableInfo.java => AbstractSideTableInfo.java} (96%) rename core/src/main/java/com/dtstack/flink/sql/side/{AllReqRow.java => BaseAllReqRow.java} (88%) rename core/src/main/java/com/dtstack/flink/sql/side/{AsyncReqRow.java => BaseAsyncReqRow.java} (91%) rename core/src/main/java/com/dtstack/flink/sql/side/{SideInfo.java => BaseSideInfo.java} (91%) rename core/src/main/java/com/dtstack/flink/sql/side/cache/{AbsSideCache.java => AbstractSideCache.java} (84%) rename core/src/main/java/com/dtstack/flink/sql/table/{AbsSideTableParser.java => AbstractSideTableParser.java} (71%) rename core/src/main/java/com/dtstack/flink/sql/table/{AbsSourceParser.java => AbstractSourceParser.java} (81%) rename core/src/main/java/com/dtstack/flink/sql/table/{SourceTableInfo.java => AbstractSourceTableInfo.java} (97%) rename core/src/main/java/com/dtstack/flink/sql/table/{TableInfo.java => AbstractTableInfo.java} (98%) rename core/src/main/java/com/dtstack/flink/sql/table/{TableInfoParser.java => AbstractTableInfoParser.java} (85%) rename core/src/main/java/com/dtstack/flink/sql/table/{AbsTableParser.java => AbstractTableParser.java} (90%) rename core/src/main/java/com/dtstack/flink/sql/table/{TargetTableInfo.java => AbstractTargetTableInfo.java} (94%) rename core/src/main/java/com/dtstack/flink/sql/watermarker/{AbsCustomerWaterMarker.java => AbstractCustomerWaterMarker.java} (94%) rename hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/{AbsRowKeyModeDealer.java => AbstractRowKeyModeDealer.java} (87%) rename hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/{ReplaceOperator.java => AbstractReplaceOperator.java} (93%) diff --git a/cassandra/cassandra-side/cassandra-all-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAllReqRow.java b/cassandra/cassandra-side/cassandra-all-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAllReqRow.java index e30b6dfc8..6d1953bfc 100644 --- a/cassandra/cassandra-side/cassandra-all-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAllReqRow.java +++ b/cassandra/cassandra-side/cassandra-all-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAllReqRow.java @@ -28,10 +28,10 @@ import com.datastax.driver.core.SocketOptions; import com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy; import com.datastax.driver.core.policies.RetryPolicy; -import com.dtstack.flink.sql.side.AllReqRow; +import com.dtstack.flink.sql.side.BaseAllReqRow; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.cassandra.table.CassandraSideTableInfo; import org.apache.calcite.sql.JoinType; import org.apache.commons.collections.CollectionUtils; @@ -60,14 +60,12 @@ * * @author xuqianjin */ -public class CassandraAllReqRow extends AllReqRow { +public class CassandraAllReqRow extends BaseAllReqRow { private static final long serialVersionUID = 54015343561288219L; private static final Logger LOG = LoggerFactory.getLogger(CassandraAllReqRow.class); - private static final String cassandra_DRIVER = "com.cassandra.jdbc.Driver"; - private static final int CONN_RETRY_NUM = 3; private static final int FETCH_SIZE = 1000; @@ -77,7 +75,7 @@ public class CassandraAllReqRow extends AllReqRow { private AtomicReference>>> cacheRef = new AtomicReference<>(); - public CassandraAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public CassandraAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new com.dtstack.flink.sql.side.cassandra.CassandraAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/cassandra/cassandra-side/cassandra-all-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAllSideInfo.java b/cassandra/cassandra-side/cassandra-all-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAllSideInfo.java index fa665f9a0..5d95dc9bf 100644 --- a/cassandra/cassandra-side/cassandra-all-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAllSideInfo.java +++ b/cassandra/cassandra-side/cassandra-all-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAllSideInfo.java @@ -20,8 +20,8 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.cassandra.table.CassandraSideTableInfo; import com.dtstack.flink.sql.util.ParseUtils; import org.apache.calcite.sql.SqlNode; @@ -37,16 +37,16 @@ * * @author xuqianjin */ -public class CassandraAllSideInfo extends SideInfo { +public class CassandraAllSideInfo extends BaseSideInfo { private static final long serialVersionUID = -8690814317653033557L; - public CassandraAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public CassandraAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { CassandraSideTableInfo cassandraSideTableInfo = (CassandraSideTableInfo) sideTableInfo; sqlCondition = "select ${selectField} from ${tableName} "; diff --git a/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncReqRow.java b/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncReqRow.java index a60116e34..3020a73d3 100644 --- a/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncReqRow.java +++ b/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncReqRow.java @@ -30,11 +30,11 @@ import com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy; import com.datastax.driver.core.policies.RetryPolicy; import com.dtstack.flink.sql.enums.ECacheContentType; -import com.dtstack.flink.sql.side.AsyncReqRow; +import com.dtstack.flink.sql.side.BaseAsyncReqRow; import com.dtstack.flink.sql.side.CacheMissVal; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.cache.CacheObj; import com.dtstack.flink.sql.side.cassandra.table.CassandraSideTableInfo; import com.google.common.base.Function; @@ -56,7 +56,6 @@ import java.net.InetAddress; import java.sql.Timestamp; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -66,7 +65,7 @@ * * @author xuqianjin */ -public class CassandraAsyncReqRow extends AsyncReqRow { +public class CassandraAsyncReqRow extends BaseAsyncReqRow { private static final long serialVersionUID = 6631584128079864735L; @@ -82,7 +81,7 @@ public class CassandraAsyncReqRow extends AsyncReqRow { private transient ListenableFuture session; private transient CassandraSideTableInfo cassandraSideTableInfo; - public CassandraAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public CassandraAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new com.dtstack.flink.sql.side.cassandra.CassandraAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncSideInfo.java b/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncSideInfo.java index 3557f0f73..82055b94c 100644 --- a/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncSideInfo.java +++ b/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncSideInfo.java @@ -20,8 +20,8 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.cassandra.table.CassandraSideTableInfo; import com.dtstack.flink.sql.util.ParseUtils; import org.apache.calcite.sql.SqlBasicCall; @@ -39,16 +39,16 @@ * * @author xuqianjin */ -public class CassandraAsyncSideInfo extends SideInfo { +public class CassandraAsyncSideInfo extends BaseSideInfo { private static final long serialVersionUID = -4403313049809013362L; - public CassandraAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public CassandraAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { CassandraSideTableInfo cassandraSideTableInfo = (CassandraSideTableInfo) sideTableInfo; String sideTableName = joinInfo.getSideTableName(); diff --git a/cassandra/cassandra-side/cassandra-side-core/src/main/java/com/dtstack/flink/sql/side/cassandra/table/CassandraSideParser.java b/cassandra/cassandra-side/cassandra-side-core/src/main/java/com/dtstack/flink/sql/side/cassandra/table/CassandraSideParser.java index d2b93563b..e893e56b0 100644 --- a/cassandra/cassandra-side/cassandra-side-core/src/main/java/com/dtstack/flink/sql/side/cassandra/table/CassandraSideParser.java +++ b/cassandra/cassandra-side/cassandra-side-core/src/main/java/com/dtstack/flink/sql/side/cassandra/table/CassandraSideParser.java @@ -19,18 +19,16 @@ package com.dtstack.flink.sql.side.cassandra.table; -import com.dtstack.flink.sql.table.AbsSideTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractSideTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; -import java.math.BigDecimal; -import java.sql.Date; import java.sql.Timestamp; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; -import static com.dtstack.flink.sql.table.TableInfo.PARALLELISM_KEY; +import static com.dtstack.flink.sql.table.AbstractTableInfo.PARALLELISM_KEY; /** * Reason: @@ -38,7 +36,7 @@ * * @author xuqianjin */ -public class CassandraSideParser extends AbsSideTableParser { +public class CassandraSideParser extends AbstractSideTableParser { private final static String SIDE_SIGN_KEY = "sideSignKey"; @@ -73,7 +71,7 @@ public CassandraSideParser() { } @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { com.dtstack.flink.sql.side.cassandra.table.CassandraSideTableInfo cassandraSideTableInfo = new com.dtstack.flink.sql.side.cassandra.table.CassandraSideTableInfo(); cassandraSideTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, cassandraSideTableInfo); @@ -96,7 +94,7 @@ public TableInfo getTableInfo(String tableName, String fieldsInfo, Map { +public class CassandraOutputFormat extends AbstractDtRichOutputFormat { private static final long serialVersionUID = -7994311331389155692L; private static final Logger LOG = LoggerFactory.getLogger(CassandraOutputFormat.class); diff --git a/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/CassandraSink.java b/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/CassandraSink.java index eb7b23b53..26152a7d3 100644 --- a/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/CassandraSink.java +++ b/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/CassandraSink.java @@ -22,7 +22,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.cassandra.table.CassandraTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -63,7 +63,7 @@ public CassandraSink() { } @Override - public CassandraSink genStreamSink(TargetTableInfo targetTableInfo) { + public CassandraSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { CassandraTableInfo cassandraTableInfo = (CassandraTableInfo) targetTableInfo; this.address = cassandraTableInfo.getAddress(); this.tableName = cassandraTableInfo.getTableName(); diff --git a/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/table/CassandraSinkParser.java b/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/table/CassandraSinkParser.java index 4c68e71ae..9ef8639ba 100644 --- a/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/table/CassandraSinkParser.java +++ b/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/table/CassandraSinkParser.java @@ -19,13 +19,13 @@ package com.dtstack.flink.sql.sink.cassandra.table; -import com.dtstack.flink.sql.table.AbsTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; -import static com.dtstack.flink.sql.table.TableInfo.PARALLELISM_KEY; +import static com.dtstack.flink.sql.table.AbstractTableInfo.PARALLELISM_KEY; /** * Reason: @@ -33,7 +33,7 @@ * * @author xuqianjin */ -public class CassandraSinkParser extends AbsTableParser { +public class CassandraSinkParser extends AbstractTableParser { public static final String ADDRESS_KEY = "address"; @@ -60,7 +60,7 @@ public class CassandraSinkParser extends AbsTableParser { public static final String POOL_TIMEOUT_MILLIS_KEY = "poolTimeoutMillis"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { CassandraTableInfo cassandraTableInfo = new CassandraTableInfo(); cassandraTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, cassandraTableInfo); diff --git a/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/table/CassandraTableInfo.java b/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/table/CassandraTableInfo.java index c6626c42a..ffb5fa876 100644 --- a/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/table/CassandraTableInfo.java +++ b/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/table/CassandraTableInfo.java @@ -19,7 +19,7 @@ package com.dtstack.flink.sql.sink.cassandra.table; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import com.google.common.base.Preconditions; /** @@ -28,7 +28,7 @@ * * @author xuqianjin */ -public class CassandraTableInfo extends TargetTableInfo { +public class CassandraTableInfo extends AbstractTargetTableInfo { private static final String CURR_TYPE = "cassandra"; diff --git a/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllReqRow.java b/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllReqRow.java index bd3d033ff..c9a0c447b 100644 --- a/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllReqRow.java +++ b/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllReqRow.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.JDBCUtils; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -37,7 +37,7 @@ public class ClickhouseAllReqRow extends AbstractRdbAllReqRow { private static final String CLICKHOUSE_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver"; - public ClickhouseAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public ClickhouseAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new ClickhouseAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllSideInfo.java b/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllSideInfo.java index 973c069b9..43fbeaa56 100644 --- a/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllSideInfo.java +++ b/clickhouse/clickhouse-side/clickhouse-all-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAllSideInfo.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.RdbAllSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -28,7 +28,7 @@ public class ClickhouseAllSideInfo extends RdbAllSideInfo { - public ClickhouseAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public ClickhouseAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java b/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java index 3733d7350..e6f008d63 100644 --- a/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java +++ b/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncReqRow; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import io.vertx.core.Vertx; @@ -37,7 +37,7 @@ public class ClickhouseAsyncReqRow extends RdbAsyncReqRow { private static final String CLICKHOUSE_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver"; - public ClickhouseAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public ClickhouseAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new ClickhouseAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncSideInfo.java b/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncSideInfo.java index 254561de0..eec5fbe74 100644 --- a/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncSideInfo.java +++ b/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncSideInfo.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -29,7 +29,7 @@ public class ClickhouseAsyncSideInfo extends RdbAsyncSideInfo { - public ClickhouseAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public ClickhouseAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/clickhouse/clickhouse-side/clickhouse-side-core/src/main/java/com/dtstack/flink/sql/side/clickhouse/table/ClickhouseSideParser.java b/clickhouse/clickhouse-side/clickhouse-side-core/src/main/java/com/dtstack/flink/sql/side/clickhouse/table/ClickhouseSideParser.java index 7be387fd8..ab285c37f 100644 --- a/clickhouse/clickhouse-side/clickhouse-side-core/src/main/java/com/dtstack/flink/sql/side/clickhouse/table/ClickhouseSideParser.java +++ b/clickhouse/clickhouse-side/clickhouse-side-core/src/main/java/com/dtstack/flink/sql/side/clickhouse/table/ClickhouseSideParser.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.side.clickhouse.table; import com.dtstack.flink.sql.side.rdb.table.RdbSideParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import ru.yandex.clickhouse.domain.ClickHouseDataType; import java.util.Map; @@ -38,8 +38,8 @@ public class ClickhouseSideParser extends RdbSideParser { private static final String CURR_TYPE = "clickhouse"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo clickhouseTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo clickhouseTableInfo = super.getTableInfo(tableName, fieldsInfo, props); clickhouseTableInfo.setType(CURR_TYPE); return clickhouseTableInfo; } diff --git a/clickhouse/clickhouse-sink/src/main/java/com/dtstack/flink/sql/sink/clickhouse/table/ClickhouseSinkParser.java b/clickhouse/clickhouse-sink/src/main/java/com/dtstack/flink/sql/sink/clickhouse/table/ClickhouseSinkParser.java index 8c3df93d7..5b0f2598f 100644 --- a/clickhouse/clickhouse-sink/src/main/java/com/dtstack/flink/sql/sink/clickhouse/table/ClickhouseSinkParser.java +++ b/clickhouse/clickhouse-sink/src/main/java/com/dtstack/flink/sql/sink/clickhouse/table/ClickhouseSinkParser.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.sink.clickhouse.table; import com.dtstack.flink.sql.sink.rdb.table.RdbSinkParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import ru.yandex.clickhouse.domain.ClickHouseDataType; import java.util.Map; @@ -30,8 +30,8 @@ public class ClickhouseSinkParser extends RdbSinkParser { private static final String CURR_TYPE = "clickhouse"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo clickhouseTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo clickhouseTableInfo = super.getTableInfo(tableName, fieldsInfo, props); clickhouseTableInfo.setType(CURR_TYPE); return clickhouseTableInfo; } diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleOutputFormat.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleOutputFormat.java index a397036ef..e99c76d03 100644 --- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleOutputFormat.java +++ b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleOutputFormat.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.sink.console; -import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; +import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat; import com.dtstack.flink.sql.sink.console.table.TablePrintUtil; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; @@ -37,7 +37,7 @@ * * @author xuqianjin */ -public class ConsoleOutputFormat extends DtRichOutputFormat { +public class ConsoleOutputFormat extends AbstractDtRichOutputFormat { private static final Logger LOG = LoggerFactory.getLogger(ConsoleOutputFormat.class); diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleSink.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleSink.java index 77a3efea2..6cfdd2d3d 100644 --- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleSink.java +++ b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleSink.java @@ -19,7 +19,7 @@ package com.dtstack.flink.sql.sink.console; import com.dtstack.flink.sql.sink.IStreamSinkGener; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -80,7 +80,7 @@ public void emitDataStream(DataStream> dataStream) { } @Override - public ConsoleSink genStreamSink(TargetTableInfo targetTableInfo) { + public ConsoleSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { return this; } } diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleSinkParser.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleSinkParser.java index e77444bfd..93ed02420 100644 --- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleSinkParser.java +++ b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleSinkParser.java @@ -18,13 +18,13 @@ package com.dtstack.flink.sql.sink.console.table; -import com.dtstack.flink.sql.table.AbsTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; -import static com.dtstack.flink.sql.table.TableInfo.PARALLELISM_KEY; +import static com.dtstack.flink.sql.table.AbstractTableInfo.PARALLELISM_KEY; /** * Reason: @@ -32,9 +32,9 @@ * * @author xuqianjin */ -public class ConsoleSinkParser extends AbsTableParser { +public class ConsoleSinkParser extends AbstractTableParser { @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { ConsoleTableInfo consoleTableInfo = new ConsoleTableInfo(); consoleTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, consoleTableInfo); diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleTableInfo.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleTableInfo.java index 4b286c667..6fd7063c6 100644 --- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleTableInfo.java +++ b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/ConsoleTableInfo.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.sink.console.table; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; /** * Reason: @@ -26,7 +26,7 @@ * * @author xuqianjin */ -public class ConsoleTableInfo extends TargetTableInfo { +public class ConsoleTableInfo extends AbstractTargetTableInfo { private static final String CURR_TYPE = "console"; diff --git a/core/src/main/java/com/dtstack/flink/sql/constrant/ConfigConstrant.java b/core/src/main/java/com/dtstack/flink/sql/constrant/ConfigConstrant.java index dcda4c093..d5b13ce12 100644 --- a/core/src/main/java/com/dtstack/flink/sql/constrant/ConfigConstrant.java +++ b/core/src/main/java/com/dtstack/flink/sql/constrant/ConfigConstrant.java @@ -65,10 +65,10 @@ public class ConfigConstrant { // restart plocy - public static final int failureRate = 3; + public static final int FAILUEE_RATE = 3; - public static final int failureInterval = 6; //min + public static final int FAILUEE_INTERVAL = 6; //min - public static final int delayInterval = 10; //sec + public static final int DELAY_INTERVAL = 10; //sec } diff --git a/core/src/main/java/com/dtstack/flink/sql/enums/ColumnType.java b/core/src/main/java/com/dtstack/flink/sql/enums/ColumnType.java index 749bbc907..7f3f0019c 100644 --- a/core/src/main/java/com/dtstack/flink/sql/enums/ColumnType.java +++ b/core/src/main/java/com/dtstack/flink/sql/enums/ColumnType.java @@ -46,11 +46,66 @@ * @author huyifan.zju@163.com */ public enum ColumnType { - STRING, VARCHAR, CHAR, - INT, MEDIUMINT, TINYINT, DATETIME, SMALLINT, BIGINT, - DOUBLE, FLOAT, + /* + * string + */ + STRING, + /** + * varchar + */ + VARCHAR, + /** + * char + */ + CHAR, + /** + * int + */ + INT, + /** + * mediumint + */ + MEDIUMINT, + /** + * tinyint + */ + TINYINT, + /** + * datetime + */ + DATETIME, + /** + * smallint + */ + SMALLINT, + /** + * bigint + */ + BIGINT, + /** + * double + */ + DOUBLE, + /** + * float + */ + FLOAT, + /** + * boolean + */ BOOLEAN, - DATE, TIMESTAMP, DECIMAL; + /** + * date + */ + DATE, + /** + * timestamp + */ + TIMESTAMP, + /** + * decimal + */ + DECIMAL; public static ColumnType fromString(String type) { if(type == null) { diff --git a/core/src/main/java/com/dtstack/flink/sql/enums/ECacheContentType.java b/core/src/main/java/com/dtstack/flink/sql/enums/ECacheContentType.java index 66160d820..49e352757 100644 --- a/core/src/main/java/com/dtstack/flink/sql/enums/ECacheContentType.java +++ b/core/src/main/java/com/dtstack/flink/sql/enums/ECacheContentType.java @@ -30,8 +30,17 @@ public enum ECacheContentType { + /** + * 无 + */ MissVal(0), + /** + * 1行 + */ SingleLine(1), + /** + * 多行 + */ MultiLine(2); int type; diff --git a/core/src/main/java/com/dtstack/flink/sql/enums/ECacheType.java b/core/src/main/java/com/dtstack/flink/sql/enums/ECacheType.java index 582148c2c..9d5bb5d11 100644 --- a/core/src/main/java/com/dtstack/flink/sql/enums/ECacheType.java +++ b/core/src/main/java/com/dtstack/flink/sql/enums/ECacheType.java @@ -27,7 +27,18 @@ * @author xuchao */ public enum ECacheType { - NONE, LRU, ALL; + /** + * none + */ + NONE, + /** + * lru + */ + LRU, + /** + * all + */ + ALL; public static boolean isValid(String type){ for(ECacheType tmpType : ECacheType.values()){ diff --git a/core/src/main/java/com/dtstack/flink/sql/enums/EDatabaseType.java b/core/src/main/java/com/dtstack/flink/sql/enums/EDatabaseType.java index 9b01bf052..7d2235f2e 100644 --- a/core/src/main/java/com/dtstack/flink/sql/enums/EDatabaseType.java +++ b/core/src/main/java/com/dtstack/flink/sql/enums/EDatabaseType.java @@ -26,8 +26,17 @@ */ public enum EDatabaseType { + /** + * mysql + */ MYSQL, + /** + * sqlserver + */ SQLSERVER, + /** + * oracle + */ ORACLE, } diff --git a/core/src/main/java/com/dtstack/flink/sql/enums/EPluginLoadMode.java b/core/src/main/java/com/dtstack/flink/sql/enums/EPluginLoadMode.java index 6cb027ac3..439966dd2 100644 --- a/core/src/main/java/com/dtstack/flink/sql/enums/EPluginLoadMode.java +++ b/core/src/main/java/com/dtstack/flink/sql/enums/EPluginLoadMode.java @@ -26,7 +26,13 @@ */ public enum EPluginLoadMode { + /** + * 0:classpath + */ CLASSPATH(0), + /** + * 1:shipfile + */ SHIPFILE(1); private int type; diff --git a/core/src/main/java/com/dtstack/flink/sql/enums/EStateBackend.java b/core/src/main/java/com/dtstack/flink/sql/enums/EStateBackend.java index a8f926175..098cb57fe 100644 --- a/core/src/main/java/com/dtstack/flink/sql/enums/EStateBackend.java +++ b/core/src/main/java/com/dtstack/flink/sql/enums/EStateBackend.java @@ -25,8 +25,17 @@ * @author maqi */ public enum EStateBackend { + /** + * memory + */ MEMORY, + /** + * rockdb + */ ROCKSDB, + /** + * filesystem + */ FILESYSTEM; public static EStateBackend convertFromString(String type) { diff --git a/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java b/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java index b453c9414..9db50e65a 100644 --- a/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java +++ b/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java @@ -102,9 +102,9 @@ public static void streamExecutionEnvironmentConfig(StreamExecutionEnvironment s }); streamEnv.setRestartStrategy(RestartStrategies.failureRateRestart( - ConfigConstrant.failureRate, - Time.of(ConfigConstrant.failureInterval, TimeUnit.MINUTES), - Time.of(ConfigConstrant.delayInterval, TimeUnit.SECONDS) + ConfigConstrant.FAILUEE_RATE, + Time.of(ConfigConstrant.FAILUEE_INTERVAL, TimeUnit.MINUTES), + Time.of(ConfigConstrant.DELAY_INTERVAL, TimeUnit.SECONDS) )); // checkpoint config diff --git a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java index 2c6d74df0..ed54a9c48 100644 --- a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java +++ b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java @@ -36,12 +36,12 @@ import com.dtstack.flink.sql.parser.SqlParser; import com.dtstack.flink.sql.parser.SqlTree; import com.dtstack.flink.sql.side.SideSqlExec; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.sink.StreamSinkFactory; import com.dtstack.flink.sql.source.StreamSourceFactory; -import com.dtstack.flink.sql.table.SourceTableInfo; -import com.dtstack.flink.sql.table.TableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractSourceTableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import com.dtstack.flink.sql.util.DtStringUtil; import com.dtstack.flink.sql.util.PluginUtil; import com.dtstack.flink.sql.watermarker.WaterMarkerAssigner; @@ -159,7 +159,7 @@ public static StreamExecutionEnvironment getStreamExecution(ParamsInfo paramsInf SqlParser.setLocalSqlPluginRoot(paramsInfo.getLocalSqlPluginPath()); SqlTree sqlTree = SqlParser.parseSql(paramsInfo.getSql()); - Map sideTableMap = Maps.newHashMap(); + Map sideTableMap = Maps.newHashMap(); Map registerTableCache = Maps.newHashMap(); //register udf @@ -193,7 +193,7 @@ public static List getExternalJarUrls(String addJarListStr) throws java.io. return jarUrlList; } - public static void sqlTranslation(String localSqlPluginPath, StreamTableEnvironment tableEnv, SqlTree sqlTree, Map sideTableMap, Map registerTableCache, StreamQueryConfig queryConfig) throws Exception { + public static void sqlTranslation(String localSqlPluginPath, StreamTableEnvironment tableEnv, SqlTree sqlTree, Map sideTableMap, Map registerTableCache, StreamQueryConfig queryConfig) throws Exception { SideSqlExec sideSqlExec = new SideSqlExec(); sideSqlExec.setLocalSqlPluginPath(localSqlPluginPath); for (CreateTmpTableParser.SqlParserResult result : sqlTree.getTmpSqlList()) { @@ -264,14 +264,14 @@ public static void registerUserDefinedFunction(SqlTree sqlTree, List jarUrl * @throws Exception */ public static Set registerTable(SqlTree sqlTree, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv, String localSqlPluginPath, - String remoteSqlPluginPath, String pluginLoadMode, Map sideTableMap, Map registerTableCache) throws Exception { + String remoteSqlPluginPath, String pluginLoadMode, Map sideTableMap, Map registerTableCache) throws Exception { Set pluginClassPatshSets = Sets.newHashSet(); WaterMarkerAssigner waterMarkerAssigner = new WaterMarkerAssigner(); - for (TableInfo tableInfo : sqlTree.getTableInfoMap().values()) { + for (AbstractTableInfo tableInfo : sqlTree.getTableInfoMap().values()) { - if (tableInfo instanceof SourceTableInfo) { + if (tableInfo instanceof AbstractSourceTableInfo) { - SourceTableInfo sourceTableInfo = (SourceTableInfo) tableInfo; + AbstractSourceTableInfo sourceTableInfo = (AbstractSourceTableInfo) tableInfo; Table table = StreamSourceFactory.getStreamSource(sourceTableInfo, env, tableEnv, localSqlPluginPath); tableEnv.registerTable(sourceTableInfo.getAdaptName(), table); //Note --- parameter conversion function can not be used inside a function of the type of polymerization @@ -302,21 +302,21 @@ public static Set registerTable(SqlTree sqlTree, StreamExecutionEnvironment } registerTableCache.put(tableInfo.getName(), regTable); - URL sourceTablePathUrl = PluginUtil.buildSourceAndSinkPathByLoadMode(tableInfo.getType(), SourceTableInfo.SOURCE_SUFFIX, localSqlPluginPath, remoteSqlPluginPath, pluginLoadMode); + URL sourceTablePathUrl = PluginUtil.buildSourceAndSinkPathByLoadMode(tableInfo.getType(), AbstractSourceTableInfo.SOURCE_SUFFIX, localSqlPluginPath, remoteSqlPluginPath, pluginLoadMode); pluginClassPatshSets.add(sourceTablePathUrl); - } else if (tableInfo instanceof TargetTableInfo) { + } else if (tableInfo instanceof AbstractTargetTableInfo) { - TableSink tableSink = StreamSinkFactory.getTableSink((TargetTableInfo) tableInfo, localSqlPluginPath); + TableSink tableSink = StreamSinkFactory.getTableSink((AbstractTargetTableInfo) tableInfo, localSqlPluginPath); TypeInformation[] flinkTypes = FunctionManager.transformTypes(tableInfo.getFieldClasses()); tableEnv.registerTableSink(tableInfo.getName(), tableInfo.getFields(), flinkTypes, tableSink); - URL sinkTablePathUrl = PluginUtil.buildSourceAndSinkPathByLoadMode(tableInfo.getType(), TargetTableInfo.TARGET_SUFFIX, localSqlPluginPath, remoteSqlPluginPath, pluginLoadMode); + URL sinkTablePathUrl = PluginUtil.buildSourceAndSinkPathByLoadMode(tableInfo.getType(), AbstractTargetTableInfo.TARGET_SUFFIX, localSqlPluginPath, remoteSqlPluginPath, pluginLoadMode); pluginClassPatshSets.add(sinkTablePathUrl); - } else if (tableInfo instanceof SideTableInfo) { - String sideOperator = ECacheType.ALL.name().equals(((SideTableInfo) tableInfo).getCacheType()) ? "all" : "async"; - sideTableMap.put(tableInfo.getName(), (SideTableInfo) tableInfo); + } else if (tableInfo instanceof AbstractSideTableInfo) { + String sideOperator = ECacheType.ALL.name().equals(((AbstractSideTableInfo) tableInfo).getCacheType()) ? "all" : "async"; + sideTableMap.put(tableInfo.getName(), (AbstractSideTableInfo) tableInfo); - URL sideTablePathUrl = PluginUtil.buildSidePathByLoadMode(tableInfo.getType(), sideOperator, SideTableInfo.TARGET_SUFFIX, localSqlPluginPath, remoteSqlPluginPath, pluginLoadMode); + URL sideTablePathUrl = PluginUtil.buildSidePathByLoadMode(tableInfo.getType(), sideOperator, AbstractSideTableInfo.TARGET_SUFFIX, localSqlPluginPath, remoteSqlPluginPath, pluginLoadMode); pluginClassPatshSets.add(sideTablePathUrl); } else { throw new RuntimeException("not support table type:" + tableInfo.getType()); diff --git a/core/src/main/java/com/dtstack/flink/sql/format/dtnest/DtNestRowDeserializationSchema.java b/core/src/main/java/com/dtstack/flink/sql/format/dtnest/DtNestRowDeserializationSchema.java index 78b579305..4c0d68eb2 100644 --- a/core/src/main/java/com/dtstack/flink/sql/format/dtnest/DtNestRowDeserializationSchema.java +++ b/core/src/main/java/com/dtstack/flink/sql/format/dtnest/DtNestRowDeserializationSchema.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.format.dtnest; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.google.common.base.Strings; import com.google.common.collect.Maps; import org.apache.flink.api.common.serialization.AbstractDeserializationSchema; @@ -57,9 +57,9 @@ public class DtNestRowDeserializationSchema extends AbstractDeserializationSchem private final String[] fieldNames; private final TypeInformation[] fieldTypes; - private List fieldExtraInfos; + private List fieldExtraInfos; - public DtNestRowDeserializationSchema(TypeInformation typeInfo, Map rowAndFieldMapping, List fieldExtraInfos) { + public DtNestRowDeserializationSchema(TypeInformation typeInfo, Map rowAndFieldMapping, List fieldExtraInfos) { this.fieldNames = ((RowTypeInfo) typeInfo).getFieldNames(); this.fieldTypes = ((RowTypeInfo) typeInfo).getFieldTypes(); this.rowAndFieldMapping = rowAndFieldMapping; @@ -75,7 +75,7 @@ public Row deserialize(byte[] message) throws IOException { try { for (int i = 0; i < fieldNames.length; i++) { JsonNode node = getIgnoreCase(fieldNames[i]); - TableInfo.FieldExtraInfo fieldExtraInfo = fieldExtraInfos.get(i); + AbstractTableInfo.FieldExtraInfo fieldExtraInfo = fieldExtraInfos.get(i); if (node == null) { if (fieldExtraInfo != null && fieldExtraInfo.getNotNull()) { diff --git a/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java b/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java index 0e8bda1fe..e49adfd93 100644 --- a/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java @@ -92,7 +92,7 @@ public Options getOptions(){ } public List getProgramExeArgList() throws Exception { - Map mapConf = PluginUtil.ObjectToMap(properties); + Map mapConf = PluginUtil.objectToMap(properties); List args = Lists.newArrayList(); for(Map.Entry one : mapConf.entrySet()){ String key = one.getKey(); diff --git a/core/src/main/java/com/dtstack/flink/sql/outputformat/DtRichOutputFormat.java b/core/src/main/java/com/dtstack/flink/sql/outputformat/AbstractDtRichOutputFormat.java similarity index 95% rename from core/src/main/java/com/dtstack/flink/sql/outputformat/DtRichOutputFormat.java rename to core/src/main/java/com/dtstack/flink/sql/outputformat/AbstractDtRichOutputFormat.java index 1fc40c13b..fbcc86bbd 100644 --- a/core/src/main/java/com/dtstack/flink/sql/outputformat/DtRichOutputFormat.java +++ b/core/src/main/java/com/dtstack/flink/sql/outputformat/AbstractDtRichOutputFormat.java @@ -27,7 +27,7 @@ * extend RichOutputFormat with metric 'dtNumRecordsOut', 'dtNumDirtyRecordsOut', 'dtNumRecordsOutRate' * Created by sishu.yss on 2018/11/28. */ -public abstract class DtRichOutputFormat extends RichOutputFormat{ +public abstract class AbstractDtRichOutputFormat extends RichOutputFormat{ public transient Counter outRecords; public transient Counter outDirtyRecords; diff --git a/core/src/main/java/com/dtstack/flink/sql/parser/SqlParser.java b/core/src/main/java/com/dtstack/flink/sql/parser/SqlParser.java index a76c1b31a..2afc76c48 100644 --- a/core/src/main/java/com/dtstack/flink/sql/parser/SqlParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/parser/SqlParser.java @@ -21,8 +21,8 @@ package com.dtstack.flink.sql.parser; import com.dtstack.flink.sql.enums.ETableType; -import com.dtstack.flink.sql.table.TableInfo; -import com.dtstack.flink.sql.table.TableInfoParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfoParser; import com.dtstack.flink.sql.util.DtStringUtil; import org.apache.commons.lang3.StringUtils; import com.google.common.collect.Lists; @@ -75,7 +75,7 @@ public static SqlTree parseSql(String sql) throws Exception { List sqlArr = DtStringUtil.splitIgnoreQuota(sql, SQL_DELIMITER); SqlTree sqlTree = new SqlTree(); - TableInfoParser tableInfoParser = new TableInfoParser(); + AbstractTableInfoParser tableInfoParser = new AbstractTableInfoParser(); for(String childSql : sqlArr){ if(Strings.isNullOrEmpty(childSql)){ continue; @@ -113,7 +113,7 @@ public static SqlTree parseSql(String sql) throws Exception { throw new RuntimeException("can't find table " + tableName); } - TableInfo tableInfo = tableInfoParser.parseWithTableType(ETableType.SOURCE.getType(), + AbstractTableInfo tableInfo = tableInfoParser.parseWithTableType(ETableType.SOURCE.getType(), createTableResult, LOCAL_SQL_PLUGIN_ROOT); sqlTree.addTableInfo(tableName, tableInfo); } @@ -126,7 +126,7 @@ public static SqlTree parseSql(String sql) throws Exception { throw new RuntimeException("can't find table " + tableName); } - TableInfo tableInfo = tableInfoParser.parseWithTableType(ETableType.SINK.getType(), + AbstractTableInfo tableInfo = tableInfoParser.parseWithTableType(ETableType.SINK.getType(), createTableResult, LOCAL_SQL_PLUGIN_ROOT); sqlTree.addTableInfo(tableName, tableInfo); } @@ -144,7 +144,7 @@ public static SqlTree parseSql(String sql) throws Exception { throw new RuntimeException("can't find table " + tableName); } } else { - TableInfo tableInfo = tableInfoParser.parseWithTableType(ETableType.SOURCE.getType(), + AbstractTableInfo tableInfo = tableInfoParser.parseWithTableType(ETableType.SOURCE.getType(), createTableResult, LOCAL_SQL_PLUGIN_ROOT); sqlTree.addTableInfo(tableName, tableInfo); } diff --git a/core/src/main/java/com/dtstack/flink/sql/parser/SqlTree.java b/core/src/main/java/com/dtstack/flink/sql/parser/SqlTree.java index 1b64b7c68..5252ee022 100644 --- a/core/src/main/java/com/dtstack/flink/sql/parser/SqlTree.java +++ b/core/src/main/java/com/dtstack/flink/sql/parser/SqlTree.java @@ -21,7 +21,7 @@ package com.dtstack.flink.sql.parser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.google.common.collect.Maps; import com.google.common.collect.Lists; @@ -41,7 +41,7 @@ public class SqlTree { private Map preDealTableMap = Maps.newHashMap(); - private Map tableInfoMap = Maps.newLinkedHashMap(); + private Map tableInfoMap = Maps.newLinkedHashMap(); private List execSqlList = Lists.newArrayList(); @@ -89,11 +89,11 @@ public List getTmpSqlList(){ return tmpSqlList; } - public Map getTableInfoMap() { + public Map getTableInfoMap() { return tableInfoMap; } - public void addTableInfo(String tableName, TableInfo tableInfo){ + public void addTableInfo(String tableName, AbstractTableInfo tableInfo){ tableInfoMap.put(tableName, tableInfo); } } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java b/core/src/main/java/com/dtstack/flink/sql/side/AbstractSideTableInfo.java similarity index 96% rename from core/src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java rename to core/src/main/java/com/dtstack/flink/sql/side/AbstractSideTableInfo.java index 8562d9859..6de2354a3 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/SideTableInfo.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/AbstractSideTableInfo.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.side; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.google.common.collect.Lists; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -35,7 +35,7 @@ * @author xuchao */ -public abstract class SideTableInfo extends TableInfo implements Serializable { +public abstract class AbstractSideTableInfo extends AbstractTableInfo implements Serializable { public static final String TARGET_SUFFIX = "Side"; diff --git a/core/src/main/java/com/dtstack/flink/sql/side/AllReqRow.java b/core/src/main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java similarity index 88% rename from core/src/main/java/com/dtstack/flink/sql/side/AllReqRow.java rename to core/src/main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java index 7d16ee726..8a6851add 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/AllReqRow.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java @@ -37,13 +37,13 @@ * @author xuchao */ -public abstract class AllReqRow extends RichFlatMapFunction implements ISideReqRow { +public abstract class BaseAllReqRow extends RichFlatMapFunction implements ISideReqRow { - protected SideInfo sideInfo; + protected BaseSideInfo sideInfo; private ScheduledExecutorService es; - public AllReqRow(SideInfo sideInfo){ + public BaseAllReqRow(BaseSideInfo sideInfo){ this.sideInfo = sideInfo; } @@ -59,7 +59,7 @@ public void open(Configuration parameters) throws Exception { System.out.println("----- all cacheRef init end-----"); //start reload cache thread - SideTableInfo sideTableInfo = sideInfo.getSideTableInfo(); + AbstractSideTableInfo sideTableInfo = sideInfo.getSideTableInfo(); es = Executors.newSingleThreadScheduledExecutor(new DTThreadFactory("cache-all-reload")); es.scheduleAtFixedRate(() -> reloadCache(), sideTableInfo.getCacheTimeout(), sideTableInfo.getCacheTimeout(), TimeUnit.MILLISECONDS); } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java b/core/src/main/java/com/dtstack/flink/sql/side/BaseAsyncReqRow.java similarity index 91% rename from core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java rename to core/src/main/java/com/dtstack/flink/sql/side/BaseAsyncReqRow.java index 0415102ab..ff08603cb 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/AsyncReqRow.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/BaseAsyncReqRow.java @@ -22,7 +22,7 @@ import com.dtstack.flink.sql.enums.ECacheType; import com.dtstack.flink.sql.metric.MetricConstant; -import com.dtstack.flink.sql.side.cache.AbsSideCache; +import com.dtstack.flink.sql.side.cache.AbstractSideCache; import com.dtstack.flink.sql.side.cache.CacheObj; import com.dtstack.flink.sql.side.cache.LRUSideCache; import org.apache.calcite.sql.JoinType; @@ -47,14 +47,14 @@ * @author xuchao */ -public abstract class AsyncReqRow extends RichAsyncFunction implements ISideReqRow { - private static final Logger LOG = LoggerFactory.getLogger(AsyncReqRow.class); +public abstract class BaseAsyncReqRow extends RichAsyncFunction implements ISideReqRow { + private static final Logger LOG = LoggerFactory.getLogger(BaseAsyncReqRow.class); private static final long serialVersionUID = 2098635244857937717L; - protected SideInfo sideInfo; + protected BaseSideInfo sideInfo; protected transient Counter parseErrorRecords; - public AsyncReqRow(SideInfo sideInfo){ + public BaseAsyncReqRow(BaseSideInfo sideInfo){ this.sideInfo = sideInfo; } @@ -66,12 +66,12 @@ public void open(Configuration parameters) throws Exception { } private void initCache(){ - SideTableInfo sideTableInfo = sideInfo.getSideTableInfo(); + AbstractSideTableInfo sideTableInfo = sideInfo.getSideTableInfo(); if(sideTableInfo.getCacheType() == null || ECacheType.NONE.name().equalsIgnoreCase(sideTableInfo.getCacheType())){ return; } - AbsSideCache sideCache; + AbstractSideCache sideCache; if(ECacheType.LRU.name().equalsIgnoreCase(sideTableInfo.getCacheType())){ sideCache = new LRUSideCache(sideTableInfo); sideInfo.setSideCache(sideCache); diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideInfo.java b/core/src/main/java/com/dtstack/flink/sql/side/BaseSideInfo.java similarity index 91% rename from core/src/main/java/com/dtstack/flink/sql/side/SideInfo.java rename to core/src/main/java/com/dtstack/flink/sql/side/BaseSideInfo.java index df41e1663..9c6e1c575 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/SideInfo.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/BaseSideInfo.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.side; -import com.dtstack.flink.sql.side.cache.AbsSideCache; +import com.dtstack.flink.sql.side.cache.AbstractSideCache; import org.apache.calcite.sql.JoinType; import org.apache.calcite.sql.SqlBasicCall; import org.apache.calcite.sql.SqlIdentifier; @@ -41,7 +41,7 @@ * @author xuchao */ -public abstract class SideInfo implements Serializable{ +public abstract class BaseSideInfo implements Serializable{ protected RowTypeInfo rowTypeInfo; @@ -66,12 +66,12 @@ public abstract class SideInfo implements Serializable{ //key:Returns the value of the position, value: the ref field name​in the side table protected Map sideFieldNameIndex = Maps.newHashMap(); - protected SideTableInfo sideTableInfo; + protected AbstractSideTableInfo sideTableInfo; - protected AbsSideCache sideCache; + protected AbstractSideCache sideCache; - public SideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, - SideTableInfo sideTableInfo){ + public BaseSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, + AbstractSideTableInfo sideTableInfo){ this.rowTypeInfo = rowTypeInfo; this.outFieldInfoList = outFieldInfoList; this.joinType = joinInfo.getJoinType(); @@ -158,7 +158,7 @@ public void dealOneEqualCon(SqlNode sqlNode, String sideTableName){ } } - public abstract void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo); + public abstract void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo); public RowTypeInfo getRowTypeInfo() { return rowTypeInfo; @@ -232,19 +232,19 @@ public void setSideFieldIndex(Map sideFieldIndex) { this.sideFieldIndex = sideFieldIndex; } - public SideTableInfo getSideTableInfo() { + public AbstractSideTableInfo getSideTableInfo() { return sideTableInfo; } - public void setSideTableInfo(SideTableInfo sideTableInfo) { + public void setSideTableInfo(AbstractSideTableInfo sideTableInfo) { this.sideTableInfo = sideTableInfo; } - public AbsSideCache getSideCache() { + public AbstractSideCache getSideCache() { return sideCache; } - public void setSideCache(AbsSideCache sideCache) { + public void setSideCache(AbstractSideCache sideCache) { this.sideCache = sideCache; } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SidePredicatesParser.java b/core/src/main/java/com/dtstack/flink/sql/side/SidePredicatesParser.java index 64f9e646b..02b9c97f8 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/SidePredicatesParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/SidePredicatesParser.java @@ -34,7 +34,6 @@ import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import static org.apache.calcite.sql.SqlKind.*; @@ -46,7 +45,7 @@ * @author maqi */ public class SidePredicatesParser { - public void fillPredicatesForSideTable(String exeSql, Map sideTableMap) throws SqlParseException { + public void fillPredicatesForSideTable(String exeSql, Map sideTableMap) throws SqlParseException { SqlParser sqlParser = SqlParser.create(exeSql, CalciteConfig.MYSQL_LEX_CONFIG); SqlNode sqlNode = sqlParser.parseStmt(); parseSql(sqlNode, sideTableMap, Maps.newHashMap()); @@ -58,7 +57,7 @@ public void fillPredicatesForSideTable(String exeSql, Map * @param sideTableMap * @param tabMapping 谓词属性中别名对应的真实维表名称 */ - private void parseSql(SqlNode sqlNode, Map sideTableMap, Map tabMapping) { + private void parseSql(SqlNode sqlNode, Map sideTableMap, Map tabMapping) { SqlKind sqlKind = sqlNode.getKind(); switch (sqlKind) { case INSERT: @@ -106,7 +105,7 @@ private void parseSql(SqlNode sqlNode, Map sideTableMap, } } - private void fillToSideTableInfo(Map sideTableMap, Map tabMapping, List predicateInfoList) { + private void fillToSideTableInfo(Map sideTableMap, Map tabMapping, List predicateInfoList) { predicateInfoList.stream().filter(info -> sideTableMap.containsKey(tabMapping.getOrDefault(info.getOwnerTable(), info.getOwnerTable()))) .map(info -> sideTableMap.get(tabMapping.getOrDefault(info.getOwnerTable(), info.getOwnerTable())).getPredicateInfoes().add(info)) .count(); diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java b/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java index 1689949ca..885fa422c 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java @@ -96,7 +96,7 @@ public class SideSqlExec { private Map localTableCache = Maps.newHashMap(); private StreamTableEnvironment tableEnv ; - public void exec(String sql, Map sideTableMap, StreamTableEnvironment tableEnv, + public void exec(String sql, Map sideTableMap, StreamTableEnvironment tableEnv, Map tableCache, StreamQueryConfig queryConfig) throws Exception { if(localSqlPluginPath == null){ throw new RuntimeException("need to set localSqlPluginPath"); @@ -657,7 +657,7 @@ private SqlNode replaceSelectFieldName(SqlNode selectNode, FieldReplaceInfo repl * * @return */ - private boolean checkJoinCondition(SqlNode conditionNode, String sideTableAlias, SideTableInfo sideTableInfo) { + private boolean checkJoinCondition(SqlNode conditionNode, String sideTableAlias, AbstractSideTableInfo sideTableInfo) { List conditionFields = getConditionFields(conditionNode, sideTableAlias, sideTableInfo); if(CollectionUtils.isEqualCollection(conditionFields, convertPrimaryAlias(sideTableInfo))){ return true; @@ -665,7 +665,7 @@ private boolean checkJoinCondition(SqlNode conditionNode, String sideTableAlias, return false; } - private List convertPrimaryAlias(SideTableInfo sideTableInfo) { + private List convertPrimaryAlias(AbstractSideTableInfo sideTableInfo) { List res = Lists.newArrayList(); sideTableInfo.getPrimaryKeys().forEach(field -> { res.add(sideTableInfo.getPhysicalFields().getOrDefault(field, field)); @@ -673,7 +673,7 @@ private List convertPrimaryAlias(SideTableInfo sideTableInfo) { return res; } - public List getConditionFields(SqlNode conditionNode, String specifyTableName, SideTableInfo sideTableInfo){ + public List getConditionFields(SqlNode conditionNode, String specifyTableName, AbstractSideTableInfo sideTableInfo){ List sqlNodeList = Lists.newArrayList(); ParseUtils.parseAnd(conditionNode, sqlNodeList); List conditionFields = Lists.newArrayList(); @@ -704,7 +704,7 @@ public List getConditionFields(SqlNode conditionNode, String specifyTabl } public void registerTmpTable(CreateTmpTableParser.SqlParserResult result, - Map sideTableMap, StreamTableEnvironment tableEnv, + Map sideTableMap, StreamTableEnvironment tableEnv, Map tableCache) throws Exception { @@ -778,7 +778,7 @@ protected void dealAsSourceTable(StreamTableEnvironment tableEnv, SqlNode pollSq } private void joinFun(Object pollObj, Map localTableCache, - Map sideTableMap, StreamTableEnvironment tableEnv, + Map sideTableMap, StreamTableEnvironment tableEnv, List replaceInfoList) throws Exception{ JoinInfo joinInfo = (JoinInfo) pollObj; @@ -799,7 +799,7 @@ private void joinFun(Object pollObj, Map localTableCache, JoinScope.ScopeChild rightScopeChild = new JoinScope.ScopeChild(); rightScopeChild.setAlias(joinInfo.getRightTableAlias()); rightScopeChild.setTableName(joinInfo.getRightTableName()); - SideTableInfo sideTableInfo = sideTableMap.get(joinInfo.getRightTableName()); + AbstractSideTableInfo sideTableInfo = sideTableMap.get(joinInfo.getRightTableName()); if(sideTableInfo == null){ sideTableInfo = sideTableMap.get(joinInfo.getRightTableAlias()); } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/StreamSideFactory.java b/core/src/main/java/com/dtstack/flink/sql/side/StreamSideFactory.java index 8417c4519..583e4597b 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/StreamSideFactory.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/StreamSideFactory.java @@ -21,8 +21,8 @@ import com.dtstack.flink.sql.classloader.ClassLoaderManager; import com.dtstack.flink.sql.enums.ECacheType; -import com.dtstack.flink.sql.table.AbsSideTableParser; -import com.dtstack.flink.sql.table.AbsTableParser; +import com.dtstack.flink.sql.table.AbstractSideTableParser; +import com.dtstack.flink.sql.table.AbstractTableParser; import com.dtstack.flink.sql.util.PluginUtil; /** @@ -37,7 +37,7 @@ public class StreamSideFactory { private static final String CURR_TYPE = "side"; - public static AbsTableParser getSqlParser(String pluginType, String sqlRootDir, String cacheType) throws Exception { + public static AbstractTableParser getSqlParser(String pluginType, String sqlRootDir, String cacheType) throws Exception { String sideOperator = ECacheType.ALL.name().equalsIgnoreCase(cacheType) ? "all" : "async"; String pluginJarPath = PluginUtil.getSideJarFileDirPath(pluginType, sideOperator, "side", sqlRootDir); @@ -45,10 +45,10 @@ public static AbsTableParser getSqlParser(String pluginType, String sqlRootDir, return ClassLoaderManager.newInstance(pluginJarPath, (cl) -> { Class sideParser = cl.loadClass(className); - if (!AbsSideTableParser.class.isAssignableFrom(sideParser)) { + if (!AbstractSideTableParser.class.isAssignableFrom(sideParser)) { throw new RuntimeException("class " + sideParser.getName() + " not subClass of AbsSideTableParser"); } - return sideParser.asSubclass(AbsTableParser.class).newInstance(); + return sideParser.asSubclass(AbstractTableParser.class).newInstance(); }); } } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/cache/AbsSideCache.java b/core/src/main/java/com/dtstack/flink/sql/side/cache/AbstractSideCache.java similarity index 84% rename from core/src/main/java/com/dtstack/flink/sql/side/cache/AbsSideCache.java rename to core/src/main/java/com/dtstack/flink/sql/side/cache/AbstractSideCache.java index 757f91600..58832c583 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/cache/AbsSideCache.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/cache/AbstractSideCache.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.side.cache; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; /** * Reason: @@ -30,11 +30,11 @@ * @author xuchao */ -public abstract class AbsSideCache { +public abstract class AbstractSideCache { - protected SideTableInfo sideTableInfo; + protected AbstractSideTableInfo sideTableInfo; - public AbsSideCache(SideTableInfo sideTableInfo){ + public AbstractSideCache(AbstractSideTableInfo sideTableInfo){ this.sideTableInfo = sideTableInfo; } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/cache/LRUSideCache.java b/core/src/main/java/com/dtstack/flink/sql/side/cache/LRUSideCache.java index 700e13bb2..2664efd34 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/cache/LRUSideCache.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/cache/LRUSideCache.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.side.cache; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; @@ -33,11 +33,11 @@ * @author xuchao */ -public class LRUSideCache extends AbsSideCache{ +public class LRUSideCache extends AbstractSideCache { protected transient Cache cache; - public LRUSideCache(SideTableInfo sideTableInfo) { + public LRUSideCache(AbstractSideTableInfo sideTableInfo) { super(sideTableInfo); } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/operator/SideAsyncOperator.java b/core/src/main/java/com/dtstack/flink/sql/side/operator/SideAsyncOperator.java index 290804200..3c2010907 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/operator/SideAsyncOperator.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/operator/SideAsyncOperator.java @@ -20,10 +20,10 @@ package com.dtstack.flink.sql.side.operator; import com.dtstack.flink.sql.classloader.ClassLoaderManager; -import com.dtstack.flink.sql.side.AsyncReqRow; +import com.dtstack.flink.sql.side.BaseAsyncReqRow; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.util.PluginUtil; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.streaming.api.datastream.AsyncDataStream; @@ -49,20 +49,20 @@ public class SideAsyncOperator { private static final String ORDERED = "ordered"; - private static AsyncReqRow loadAsyncReq(String sideType, String sqlRootDir, RowTypeInfo rowTypeInfo, - JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) throws Exception { + private static BaseAsyncReqRow loadAsyncReq(String sideType, String sqlRootDir, RowTypeInfo rowTypeInfo, + JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) throws Exception { String pathOfType = String.format(PATH_FORMAT, sideType); String pluginJarPath = PluginUtil.getJarFileDirPath(pathOfType, sqlRootDir); String className = PluginUtil.getSqlSideClassName(sideType, "side", OPERATOR_TYPE); return ClassLoaderManager.newInstance(pluginJarPath, (cl) -> - cl.loadClass(className).asSubclass(AsyncReqRow.class) - .getConstructor(RowTypeInfo.class, JoinInfo.class, List.class, SideTableInfo.class) + cl.loadClass(className).asSubclass(BaseAsyncReqRow.class) + .getConstructor(RowTypeInfo.class, JoinInfo.class, List.class, AbstractSideTableInfo.class) .newInstance(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } public static DataStream getSideJoinDataStream(DataStream inputStream, String sideType, String sqlRootDir, RowTypeInfo rowTypeInfo, JoinInfo joinInfo, - List outFieldInfoList, SideTableInfo sideTableInfo) throws Exception { - AsyncReqRow asyncDbReq = loadAsyncReq(sideType, sqlRootDir, rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); + List outFieldInfoList, AbstractSideTableInfo sideTableInfo) throws Exception { + BaseAsyncReqRow asyncDbReq = loadAsyncReq(sideType, sqlRootDir, rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); //TODO How much should be set for the degree of parallelism? Timeout? capacity settings? if (ORDERED.equals(sideTableInfo.getCacheMode())){ diff --git a/core/src/main/java/com/dtstack/flink/sql/side/operator/SideWithAllCacheOperator.java b/core/src/main/java/com/dtstack/flink/sql/side/operator/SideWithAllCacheOperator.java index 5aa810b0f..6b6f9fe1b 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/operator/SideWithAllCacheOperator.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/operator/SideWithAllCacheOperator.java @@ -20,10 +20,10 @@ package com.dtstack.flink.sql.side.operator; import com.dtstack.flink.sql.classloader.ClassLoaderManager; -import com.dtstack.flink.sql.side.AllReqRow; +import com.dtstack.flink.sql.side.BaseAllReqRow; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.util.PluginUtil; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.streaming.api.datastream.DataStream; @@ -44,22 +44,22 @@ public class SideWithAllCacheOperator { private static final String OPERATOR_TYPE = "All"; - private static AllReqRow loadFlatMap(String sideType, String sqlRootDir, RowTypeInfo rowTypeInfo, - JoinInfo joinInfo, List outFieldInfoList, - SideTableInfo sideTableInfo) throws Exception { + private static BaseAllReqRow loadFlatMap(String sideType, String sqlRootDir, RowTypeInfo rowTypeInfo, + JoinInfo joinInfo, List outFieldInfoList, + AbstractSideTableInfo sideTableInfo) throws Exception { String pathOfType = String.format(PATH_FORMAT, sideType); String pluginJarPath = PluginUtil.getJarFileDirPath(pathOfType, sqlRootDir); String className = PluginUtil.getSqlSideClassName(sideType, "side", OPERATOR_TYPE); - return ClassLoaderManager.newInstance(pluginJarPath, (cl) -> cl.loadClass(className).asSubclass(AllReqRow.class) - .getConstructor(RowTypeInfo.class, JoinInfo.class, List.class, SideTableInfo.class) + return ClassLoaderManager.newInstance(pluginJarPath, (cl) -> cl.loadClass(className).asSubclass(BaseAllReqRow.class) + .getConstructor(RowTypeInfo.class, JoinInfo.class, List.class, AbstractSideTableInfo.class) .newInstance(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } public static DataStream getSideJoinDataStream(DataStream inputStream, String sideType, String sqlRootDir, RowTypeInfo rowTypeInfo, JoinInfo joinInfo, - List outFieldInfoList, SideTableInfo sideTableInfo) throws Exception { - AllReqRow allReqRow = loadFlatMap(sideType, sqlRootDir, rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); + List outFieldInfoList, AbstractSideTableInfo sideTableInfo) throws Exception { + BaseAllReqRow allReqRow = loadFlatMap(sideType, sqlRootDir, rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); return inputStream.flatMap(allReqRow); } } diff --git a/core/src/main/java/com/dtstack/flink/sql/sink/IStreamSinkGener.java b/core/src/main/java/com/dtstack/flink/sql/sink/IStreamSinkGener.java index 3cfd48f1b..e29421369 100644 --- a/core/src/main/java/com/dtstack/flink/sql/sink/IStreamSinkGener.java +++ b/core/src/main/java/com/dtstack/flink/sql/sink/IStreamSinkGener.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.sink; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; /** * Reason: @@ -30,5 +30,5 @@ */ public interface IStreamSinkGener { - T genStreamSink(TargetTableInfo targetTableInfo); + T genStreamSink(AbstractTargetTableInfo targetTableInfo); } diff --git a/core/src/main/java/com/dtstack/flink/sql/sink/StreamSinkFactory.java b/core/src/main/java/com/dtstack/flink/sql/sink/StreamSinkFactory.java index 53460081d..89061db3a 100644 --- a/core/src/main/java/com/dtstack/flink/sql/sink/StreamSinkFactory.java +++ b/core/src/main/java/com/dtstack/flink/sql/sink/StreamSinkFactory.java @@ -21,8 +21,8 @@ package com.dtstack.flink.sql.sink; import com.dtstack.flink.sql.classloader.ClassLoaderManager; -import com.dtstack.flink.sql.table.AbsTableParser; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTableParser; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import com.dtstack.flink.sql.util.DtStringUtil; import com.dtstack.flink.sql.util.PluginUtil; import org.apache.flink.table.sinks.TableSink; @@ -40,21 +40,21 @@ public class StreamSinkFactory { private static final String DIR_NAME_FORMAT = "%ssink"; - public static AbsTableParser getSqlParser(String pluginType, String sqlRootDir) throws Exception { + public static AbstractTableParser getSqlParser(String pluginType, String sqlRootDir) throws Exception { String pluginJarPath = PluginUtil.getJarFileDirPath(String.format(DIR_NAME_FORMAT, pluginType), sqlRootDir); String typeNoVersion = DtStringUtil.getPluginTypeWithoutVersion(pluginType); String className = PluginUtil.getSqlParserClassName(typeNoVersion, CURR_TYPE); return ClassLoaderManager.newInstance(pluginJarPath, (cl) -> { Class targetParser = cl.loadClass(className); - if(!AbsTableParser.class.isAssignableFrom(targetParser)){ + if(!AbstractTableParser.class.isAssignableFrom(targetParser)){ throw new RuntimeException("class " + targetParser.getName() + " not subClass of AbsTableParser"); } - return targetParser.asSubclass(AbsTableParser.class).newInstance(); + return targetParser.asSubclass(AbstractTableParser.class).newInstance(); }); } - public static TableSink getTableSink(TargetTableInfo targetTableInfo, String localSqlRootDir) throws Exception { + public static TableSink getTableSink(AbstractTargetTableInfo targetTableInfo, String localSqlRootDir) throws Exception { String pluginType = targetTableInfo.getType(); String pluginJarDirPath = PluginUtil.getJarFileDirPath(String.format(DIR_NAME_FORMAT, pluginType), localSqlRootDir); String typeNoVersion = DtStringUtil.getPluginTypeWithoutVersion(pluginType); diff --git a/core/src/main/java/com/dtstack/flink/sql/source/IStreamSourceGener.java b/core/src/main/java/com/dtstack/flink/sql/source/IStreamSourceGener.java index b8b8f6edc..fde5a235d 100644 --- a/core/src/main/java/com/dtstack/flink/sql/source/IStreamSourceGener.java +++ b/core/src/main/java/com/dtstack/flink/sql/source/IStreamSourceGener.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.source; -import com.dtstack.flink.sql.table.SourceTableInfo; +import com.dtstack.flink.sql.table.AbstractSourceTableInfo; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.table.api.java.StreamTableEnvironment; @@ -38,6 +38,6 @@ public interface IStreamSourceGener { * @param tableEnv * @return */ - T genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv); + T genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv); } diff --git a/core/src/main/java/com/dtstack/flink/sql/source/StreamSourceFactory.java b/core/src/main/java/com/dtstack/flink/sql/source/StreamSourceFactory.java index 1057fb0ed..e0cec1415 100644 --- a/core/src/main/java/com/dtstack/flink/sql/source/StreamSourceFactory.java +++ b/core/src/main/java/com/dtstack/flink/sql/source/StreamSourceFactory.java @@ -22,8 +22,8 @@ import com.dtstack.flink.sql.classloader.ClassLoaderManager; -import com.dtstack.flink.sql.table.AbsSourceParser; -import com.dtstack.flink.sql.table.SourceTableInfo; +import com.dtstack.flink.sql.table.AbstractSourceParser; +import com.dtstack.flink.sql.table.AbstractSourceTableInfo; import com.dtstack.flink.sql.util.DtStringUtil; import com.dtstack.flink.sql.util.PluginUtil; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; @@ -43,17 +43,17 @@ public class StreamSourceFactory { private static final String DIR_NAME_FORMAT = "%ssource"; - public static AbsSourceParser getSqlParser(String pluginType, String sqlRootDir) throws Exception { + public static AbstractSourceParser getSqlParser(String pluginType, String sqlRootDir) throws Exception { String pluginJarPath = PluginUtil.getJarFileDirPath(String.format(DIR_NAME_FORMAT, pluginType), sqlRootDir); String typeNoVersion = DtStringUtil.getPluginTypeWithoutVersion(pluginType); String className = PluginUtil.getSqlParserClassName(typeNoVersion, CURR_TYPE); return ClassLoaderManager.newInstance(pluginJarPath, (cl) -> { Class sourceParser = cl.loadClass(className); - if(!AbsSourceParser.class.isAssignableFrom(sourceParser)){ + if(!AbstractSourceParser.class.isAssignableFrom(sourceParser)){ throw new RuntimeException("class " + sourceParser.getName() + " not subClass of AbsSourceParser"); } - return sourceParser.asSubclass(AbsSourceParser.class).newInstance(); + return sourceParser.asSubclass(AbstractSourceParser.class).newInstance(); }); } @@ -62,7 +62,7 @@ public static AbsSourceParser getSqlParser(String pluginType, String sqlRootDir) * @param sourceTableInfo * @return */ - public static Table getStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, + public static Table getStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv, String sqlRootDir) throws Exception { String sourceTypeStr = sourceTableInfo.getType(); diff --git a/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java b/core/src/main/java/com/dtstack/flink/sql/table/AbstractSideTableParser.java similarity index 71% rename from core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java rename to core/src/main/java/com/dtstack/flink/sql/table/AbstractSideTableParser.java index 49c5629bc..8da46b079 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/AbsSideTableParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/AbstractSideTableParser.java @@ -21,7 +21,7 @@ package com.dtstack.flink.sql.table; import com.dtstack.flink.sql.enums.ECacheType; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; import java.util.regex.Matcher; @@ -34,24 +34,24 @@ * @author xuchao */ -public abstract class AbsSideTableParser extends AbsTableParser { +public abstract class AbstractSideTableParser extends AbstractTableParser { private final static String SIDE_SIGN_KEY = "sideSignKey"; private final static Pattern SIDE_TABLE_SIGN = Pattern.compile("(?i)^PERIOD\\s+FOR\\s+SYSTEM_TIME$"); - public AbsSideTableParser() { + public AbstractSideTableParser() { addParserHandler(SIDE_SIGN_KEY, SIDE_TABLE_SIGN, this::dealSideSign); } - private void dealSideSign(Matcher matcher, TableInfo tableInfo){ + private void dealSideSign(Matcher matcher, AbstractTableInfo tableInfo){ //FIXME SIDE_TABLE_SIGN current just used as a sign for side table; and do nothing } //Analytical create table attributes ==> Get information cache - protected void parseCacheProp(SideTableInfo sideTableInfo, Map props){ - if(props.containsKey(SideTableInfo.CACHE_KEY.toLowerCase())){ - String cacheType = MathUtil.getString(props.get(SideTableInfo.CACHE_KEY.toLowerCase())); + protected void parseCacheProp(AbstractSideTableInfo sideTableInfo, Map props){ + if(props.containsKey(AbstractSideTableInfo.CACHE_KEY.toLowerCase())){ + String cacheType = MathUtil.getString(props.get(AbstractSideTableInfo.CACHE_KEY.toLowerCase())); if(cacheType == null){ return; } @@ -61,31 +61,31 @@ protected void parseCacheProp(SideTableInfo sideTableInfo, Map p } sideTableInfo.setCacheType(cacheType); - if(props.containsKey(SideTableInfo.CACHE_SIZE_KEY.toLowerCase())){ - Integer cacheSize = MathUtil.getIntegerVal(props.get(SideTableInfo.CACHE_SIZE_KEY.toLowerCase())); + if(props.containsKey(AbstractSideTableInfo.CACHE_SIZE_KEY.toLowerCase())){ + Integer cacheSize = MathUtil.getIntegerVal(props.get(AbstractSideTableInfo.CACHE_SIZE_KEY.toLowerCase())); if(cacheSize < 0){ throw new RuntimeException("cache size need > 0."); } sideTableInfo.setCacheSize(cacheSize); } - if(props.containsKey(SideTableInfo.CACHE_TTLMS_KEY.toLowerCase())){ - Long cacheTTLMS = MathUtil.getLongVal(props.get(SideTableInfo.CACHE_TTLMS_KEY.toLowerCase())); + if(props.containsKey(AbstractSideTableInfo.CACHE_TTLMS_KEY.toLowerCase())){ + Long cacheTTLMS = MathUtil.getLongVal(props.get(AbstractSideTableInfo.CACHE_TTLMS_KEY.toLowerCase())); if(cacheTTLMS < 1000){ throw new RuntimeException("cache time out need > 1000 ms."); } sideTableInfo.setCacheTimeout(cacheTTLMS); } - if(props.containsKey(SideTableInfo.PARTITIONED_JOIN_KEY.toLowerCase())){ - Boolean partitionedJoinKey = MathUtil.getBoolean(props.get(SideTableInfo.PARTITIONED_JOIN_KEY.toLowerCase())); + if(props.containsKey(AbstractSideTableInfo.PARTITIONED_JOIN_KEY.toLowerCase())){ + Boolean partitionedJoinKey = MathUtil.getBoolean(props.get(AbstractSideTableInfo.PARTITIONED_JOIN_KEY.toLowerCase())); if(partitionedJoinKey){ sideTableInfo.setPartitionedJoin(true); } } - if(props.containsKey(SideTableInfo.CACHE_MODE_KEY.toLowerCase())){ - String cachemode = MathUtil.getString(props.get(SideTableInfo.CACHE_MODE_KEY.toLowerCase())); + if(props.containsKey(AbstractSideTableInfo.CACHE_MODE_KEY.toLowerCase())){ + String cachemode = MathUtil.getString(props.get(AbstractSideTableInfo.CACHE_MODE_KEY.toLowerCase())); if(!"ordered".equalsIgnoreCase(cachemode) && !"unordered".equalsIgnoreCase(cachemode)){ throw new RuntimeException("cachemode must ordered or unordered!"); @@ -93,16 +93,16 @@ protected void parseCacheProp(SideTableInfo sideTableInfo, Map p sideTableInfo.setCacheMode(cachemode.toLowerCase()); } - if(props.containsKey(SideTableInfo.ASYNC_CAP_KEY.toLowerCase())){ - Integer asyncCap = MathUtil.getIntegerVal(props.get(SideTableInfo.ASYNC_CAP_KEY.toLowerCase())); + if(props.containsKey(AbstractSideTableInfo.ASYNC_CAP_KEY.toLowerCase())){ + Integer asyncCap = MathUtil.getIntegerVal(props.get(AbstractSideTableInfo.ASYNC_CAP_KEY.toLowerCase())); if(asyncCap < 0){ throw new RuntimeException("asyncCapacity size need > 0."); } sideTableInfo.setAsyncCapacity(asyncCap); } - if(props.containsKey(SideTableInfo.ASYNC_TIMEOUT_KEY.toLowerCase())){ - Integer asyncTimeout = MathUtil.getIntegerVal(props.get(SideTableInfo.ASYNC_TIMEOUT_KEY.toLowerCase())); + if(props.containsKey(AbstractSideTableInfo.ASYNC_TIMEOUT_KEY.toLowerCase())){ + Integer asyncTimeout = MathUtil.getIntegerVal(props.get(AbstractSideTableInfo.ASYNC_TIMEOUT_KEY.toLowerCase())); if (asyncTimeout<0){ throw new RuntimeException("asyncTimeout size need > 0."); } diff --git a/core/src/main/java/com/dtstack/flink/sql/table/AbsSourceParser.java b/core/src/main/java/com/dtstack/flink/sql/table/AbstractSourceParser.java similarity index 81% rename from core/src/main/java/com/dtstack/flink/sql/table/AbsSourceParser.java rename to core/src/main/java/com/dtstack/flink/sql/table/AbstractSourceParser.java index 745357162..308f5859f 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/AbsSourceParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/AbstractSourceParser.java @@ -33,7 +33,7 @@ * @author xuchao */ -public abstract class AbsSourceParser extends AbsTableParser { +public abstract class AbstractSourceParser extends AbstractTableParser { private static final String VIRTUAL_KEY = "virtualFieldKey"; private static final String WATERMARK_KEY = "waterMarkKey"; @@ -43,21 +43,21 @@ public abstract class AbsSourceParser extends AbsTableParser { private static Pattern waterMarkKeyPattern = Pattern.compile("(?i)^\\s*WATERMARK\\s+FOR\\s+(\\S+)\\s+AS\\s+withOffset\\(\\s*(\\S+)\\s*,\\s*(\\d+)\\s*\\)$"); private static Pattern notNullKeyPattern = Pattern.compile("(?i)^(\\w+)\\s+(\\w+)\\s+NOT\\s+NULL?$"); - public AbsSourceParser() { + public AbstractSourceParser() { addParserHandler(VIRTUAL_KEY, virtualFieldKeyPattern, this::dealVirtualField); addParserHandler(WATERMARK_KEY, waterMarkKeyPattern, this::dealWaterMark); addParserHandler(NOTNULL_KEY, notNullKeyPattern, this::dealNotNull); } - protected void dealVirtualField(Matcher matcher, TableInfo tableInfo){ - SourceTableInfo sourceTableInfo = (SourceTableInfo) tableInfo; + protected void dealVirtualField(Matcher matcher, AbstractTableInfo tableInfo){ + AbstractSourceTableInfo sourceTableInfo = (AbstractSourceTableInfo) tableInfo; String fieldName = matcher.group(2); String expression = matcher.group(1); sourceTableInfo.addVirtualField(fieldName, expression); } - protected void dealWaterMark(Matcher matcher, TableInfo tableInfo){ - SourceTableInfo sourceTableInfo = (SourceTableInfo) tableInfo; + protected void dealWaterMark(Matcher matcher, AbstractTableInfo tableInfo){ + AbstractSourceTableInfo sourceTableInfo = (AbstractSourceTableInfo) tableInfo; String eventTimeField = matcher.group(1); //FIXME Temporarily resolve the second parameter row_time_field Integer offset = MathUtil.getIntegerVal(matcher.group(3)); @@ -65,11 +65,11 @@ protected void dealWaterMark(Matcher matcher, TableInfo tableInfo){ sourceTableInfo.setMaxOutOrderness(offset); } - protected void dealNotNull(Matcher matcher, TableInfo tableInfo) { + protected void dealNotNull(Matcher matcher, AbstractTableInfo tableInfo) { String fieldName = matcher.group(1); String fieldType = matcher.group(2); Class fieldClass= dbTypeConvertToJavaType(fieldType); - TableInfo.FieldExtraInfo fieldExtraInfo = new TableInfo.FieldExtraInfo(); + AbstractTableInfo.FieldExtraInfo fieldExtraInfo = new AbstractTableInfo.FieldExtraInfo(); fieldExtraInfo.setNotNull(true); tableInfo.addPhysicalMappings(fieldName, fieldName); diff --git a/core/src/main/java/com/dtstack/flink/sql/table/SourceTableInfo.java b/core/src/main/java/com/dtstack/flink/sql/table/AbstractSourceTableInfo.java similarity index 97% rename from core/src/main/java/com/dtstack/flink/sql/table/SourceTableInfo.java rename to core/src/main/java/com/dtstack/flink/sql/table/AbstractSourceTableInfo.java index 9a41fa0a1..0fdc0e911 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/SourceTableInfo.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/AbstractSourceTableInfo.java @@ -36,7 +36,7 @@ * @author xuchao */ -public abstract class SourceTableInfo extends TableInfo { +public abstract class AbstractSourceTableInfo extends AbstractTableInfo { public static final String SOURCE_SUFFIX = "Source"; diff --git a/core/src/main/java/com/dtstack/flink/sql/table/TableInfo.java b/core/src/main/java/com/dtstack/flink/sql/table/AbstractTableInfo.java similarity index 98% rename from core/src/main/java/com/dtstack/flink/sql/table/TableInfo.java rename to core/src/main/java/com/dtstack/flink/sql/table/AbstractTableInfo.java index 59e4fdd39..e450adb2f 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/TableInfo.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/AbstractTableInfo.java @@ -34,7 +34,7 @@ * @author xuchao */ -public abstract class TableInfo implements Serializable { +public abstract class AbstractTableInfo implements Serializable { public static final String PARALLELISM_KEY = "parallelism"; diff --git a/core/src/main/java/com/dtstack/flink/sql/table/TableInfoParser.java b/core/src/main/java/com/dtstack/flink/sql/table/AbstractTableInfoParser.java similarity index 85% rename from core/src/main/java/com/dtstack/flink/sql/table/TableInfoParser.java rename to core/src/main/java/com/dtstack/flink/sql/table/AbstractTableInfoParser.java index ae98d90ae..c6687433c 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/TableInfoParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/AbstractTableInfoParser.java @@ -22,7 +22,7 @@ import com.dtstack.flink.sql.enums.ETableType; import com.dtstack.flink.sql.parser.CreateTableParser; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.StreamSideFactory; import com.dtstack.flink.sql.sink.StreamSinkFactory; import com.dtstack.flink.sql.source.StreamSourceFactory; @@ -41,7 +41,7 @@ * @author xuchao */ -public class TableInfoParser { +public class AbstractTableInfoParser { private final static String TYPE_KEY = "type"; @@ -49,16 +49,16 @@ public class TableInfoParser { private final static Pattern SIDE_PATTERN = Pattern.compile(SIDE_TABLE_SIGN); - private Map sourceTableInfoMap = Maps.newConcurrentMap(); + private Map sourceTableInfoMap = Maps.newConcurrentMap(); - private Map targetTableInfoMap = Maps.newConcurrentMap(); + private Map targetTableInfoMap = Maps.newConcurrentMap(); - private Map sideTableInfoMap = Maps.newConcurrentMap(); + private Map sideTableInfoMap = Maps.newConcurrentMap(); //Parsing loaded plugin - public TableInfo parseWithTableType(int tableType, CreateTableParser.SqlParserResult parserResult, - String localPluginRoot) throws Exception { - AbsTableParser absTableParser = null; + public AbstractTableInfo parseWithTableType(int tableType, CreateTableParser.SqlParserResult parserResult, + String localPluginRoot) throws Exception { + AbstractTableParser absTableParser = null; Map props = parserResult.getPropMap(); String type = MathUtil.getString(props.get(TYPE_KEY)); @@ -78,7 +78,7 @@ public TableInfo parseWithTableType(int tableType, CreateTableParser.SqlParserRe }else{ absTableParser = sideTableInfoMap.get(type); if(absTableParser == null){ - String cacheType = MathUtil.getString(props.get(SideTableInfo.CACHE_KEY)); + String cacheType = MathUtil.getString(props.get(AbstractSideTableInfo.CACHE_KEY)); absTableParser = StreamSideFactory.getSqlParser(type, localPluginRoot, cacheType); sideTableInfoMap.put(type, absTableParser); } diff --git a/core/src/main/java/com/dtstack/flink/sql/table/AbsTableParser.java b/core/src/main/java/com/dtstack/flink/sql/table/AbstractTableParser.java similarity index 90% rename from core/src/main/java/com/dtstack/flink/sql/table/AbsTableParser.java rename to core/src/main/java/com/dtstack/flink/sql/table/AbstractTableParser.java index 31e70caa2..52d9d9af5 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/AbsTableParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/AbstractTableParser.java @@ -39,7 +39,7 @@ * @author xuchao */ -public abstract class AbsTableParser { +public abstract class AbstractTableParser { private static final String PRIMARY_KEY = "primaryKey"; private static final String NEST_JSON_FIELD_KEY = "nestFieldKey"; @@ -52,7 +52,7 @@ public abstract class AbsTableParser { private Map handlerMap = Maps.newHashMap(); - public AbsTableParser() { + public AbstractTableParser() { addParserHandler(PRIMARY_KEY, primaryKeyPattern, this::dealPrimaryKey); addParserHandler(NEST_JSON_FIELD_KEY, nestJsonFieldKeyPattern, this::dealNestField); } @@ -61,9 +61,9 @@ protected boolean fieldNameNeedsUpperCase() { return true; } - public abstract TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) throws Exception; + public abstract AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) throws Exception; - public boolean dealKeyPattern(String fieldRow, TableInfo tableInfo){ + public boolean dealKeyPattern(String fieldRow, AbstractTableInfo tableInfo){ for(Map.Entry keyPattern : patternMap.entrySet()){ Pattern pattern = keyPattern.getValue(); String key = keyPattern.getKey(); @@ -82,7 +82,7 @@ public boolean dealKeyPattern(String fieldRow, TableInfo tableInfo){ return false; } - public void parseFieldsInfo(String fieldsInfo, TableInfo tableInfo){ + public void parseFieldsInfo(String fieldsInfo, AbstractTableInfo tableInfo){ List fieldRows = DtStringUtil.splitIgnoreQuota(fieldsInfo, ','); for(String fieldRow : fieldRows){ @@ -119,7 +119,7 @@ public void parseFieldsInfo(String fieldsInfo, TableInfo tableInfo){ tableInfo.finish(); } - public void dealPrimaryKey(Matcher matcher, TableInfo tableInfo){ + public void dealPrimaryKey(Matcher matcher, AbstractTableInfo tableInfo){ String primaryFields = matcher.group(1).trim(); String[] splitArry = primaryFields.split(","); List primaryKes = Lists.newArrayList(splitArry); @@ -131,7 +131,7 @@ public void dealPrimaryKey(Matcher matcher, TableInfo tableInfo){ * @param matcher * @param tableInfo */ - protected void dealNestField(Matcher matcher, TableInfo tableInfo) { + protected void dealNestField(Matcher matcher, AbstractTableInfo tableInfo) { String physicalField = matcher.group(1); Preconditions.checkArgument(!physicalFieldFunPattern.matcher(physicalField).find(), "No need to add data types when using functions, The correct way is : strLen(name) as nameSize, "); @@ -140,7 +140,7 @@ protected void dealNestField(Matcher matcher, TableInfo tableInfo) { String mappingField = matcher.group(4); Class fieldClass = dbTypeConvertToJavaType(fieldType); boolean notNull = matcher.group(5) != null; - TableInfo.FieldExtraInfo fieldExtraInfo = new TableInfo.FieldExtraInfo(); + AbstractTableInfo.FieldExtraInfo fieldExtraInfo = new AbstractTableInfo.FieldExtraInfo(); fieldExtraInfo.setNotNull(notNull); tableInfo.addPhysicalMappings(mappingField, physicalField); diff --git a/core/src/main/java/com/dtstack/flink/sql/table/TargetTableInfo.java b/core/src/main/java/com/dtstack/flink/sql/table/AbstractTargetTableInfo.java similarity index 94% rename from core/src/main/java/com/dtstack/flink/sql/table/TargetTableInfo.java rename to core/src/main/java/com/dtstack/flink/sql/table/AbstractTargetTableInfo.java index 0dc9ca77b..dbd99f993 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/TargetTableInfo.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/AbstractTargetTableInfo.java @@ -29,7 +29,7 @@ * @author xuchao */ -public abstract class TargetTableInfo extends TableInfo { +public abstract class AbstractTargetTableInfo extends AbstractTableInfo { public static final String TARGET_SUFFIX = "Sink"; diff --git a/core/src/main/java/com/dtstack/flink/sql/table/ITableFieldDealHandler.java b/core/src/main/java/com/dtstack/flink/sql/table/ITableFieldDealHandler.java index db804ea34..fab634cdc 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/ITableFieldDealHandler.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/ITableFieldDealHandler.java @@ -30,5 +30,5 @@ */ public interface ITableFieldDealHandler { - void dealPrimaryKey(Matcher matcher, TableInfo tableInfo); + void dealPrimaryKey(Matcher matcher, AbstractTableInfo tableInfo); } diff --git a/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java index da5a42b57..c942fb064 100644 --- a/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java +++ b/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java @@ -39,7 +39,6 @@ */ public class DateUtil { - static final String timeZone = "GMT+8"; static final String datetimeFormat = "yyyy-MM-dd HH:mm:ss"; static final String dateFormat = "yyyy-MM-dd"; static final String timeFormat = "HH:mm:ss"; diff --git a/core/src/main/java/com/dtstack/flink/sql/util/PluginUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/PluginUtil.java index a63e5a663..8eb743cf0 100644 --- a/core/src/main/java/com/dtstack/flink/sql/util/PluginUtil.java +++ b/core/src/main/java/com/dtstack/flink/sql/util/PluginUtil.java @@ -109,7 +109,7 @@ public static String getSqlSideClassName(String pluginTypeName, String type, Str return CLASS_PRE_STR + "." + type.toLowerCase() + "." + pluginTypeName + "." + pluginClassName; } - public static Map ObjectToMap(Object obj) throws Exception{ + public static Map objectToMap(Object obj) throws Exception{ return objectMapper.readValue(objectMapper.writeValueAsBytes(obj), Map.class); } @@ -173,8 +173,8 @@ public static void addPluginJar(String pluginDir, DtClassLoader classLoader) thr } for(File file : files){ - URL pluginJarURL = file.toURI().toURL(); - classLoader.addURL(pluginJarURL); + URL pluginJarUrl = file.toURI().toURL(); + classLoader.addURL(pluginJarUrl); } } @@ -191,8 +191,8 @@ public static URL[] getPluginJarUrls(String pluginDir) throws MalformedURLExcept } for(File file : files){ - URL pluginJarURL = file.toURI().toURL(); - urlList.add(pluginJarURL); + URL pluginJarUrl = file.toURI().toURL(); + urlList.add(pluginJarUrl); } return urlList.toArray(new URL[urlList.size()]); } diff --git a/core/src/main/java/com/dtstack/flink/sql/watermarker/AbsCustomerWaterMarker.java b/core/src/main/java/com/dtstack/flink/sql/watermarker/AbstractCustomerWaterMarker.java similarity index 94% rename from core/src/main/java/com/dtstack/flink/sql/watermarker/AbsCustomerWaterMarker.java rename to core/src/main/java/com/dtstack/flink/sql/watermarker/AbstractCustomerWaterMarker.java index ed8f64288..d75d26a61 100644 --- a/core/src/main/java/com/dtstack/flink/sql/watermarker/AbsCustomerWaterMarker.java +++ b/core/src/main/java/com/dtstack/flink/sql/watermarker/AbstractCustomerWaterMarker.java @@ -38,7 +38,7 @@ * @author xuchao */ -public abstract class AbsCustomerWaterMarker extends BoundedOutOfOrdernessTimestampExtractor implements RichFunction { +public abstract class AbstractCustomerWaterMarker extends BoundedOutOfOrdernessTimestampExtractor implements RichFunction { private static final long serialVersionUID = 1L; @@ -55,7 +55,7 @@ public abstract class AbsCustomerWaterMarker extends BoundedOutOfOrdernessTim protected TimeZone timezone; - public AbsCustomerWaterMarker(Time maxOutOfOrderness) { + public AbstractCustomerWaterMarker(Time maxOutOfOrderness) { super(maxOutOfOrderness); } diff --git a/core/src/main/java/com/dtstack/flink/sql/watermarker/CustomerWaterMarkerForLong.java b/core/src/main/java/com/dtstack/flink/sql/watermarker/CustomerWaterMarkerForLong.java index 99415ba61..979b4c3d6 100644 --- a/core/src/main/java/com/dtstack/flink/sql/watermarker/CustomerWaterMarkerForLong.java +++ b/core/src/main/java/com/dtstack/flink/sql/watermarker/CustomerWaterMarkerForLong.java @@ -35,7 +35,7 @@ * @author xuchao */ -public class CustomerWaterMarkerForLong extends AbsCustomerWaterMarker { +public class CustomerWaterMarkerForLong extends AbstractCustomerWaterMarker { private static final Logger logger = LoggerFactory.getLogger(CustomerWaterMarkerForLong.class); diff --git a/core/src/main/java/com/dtstack/flink/sql/watermarker/CustomerWaterMarkerForTimeStamp.java b/core/src/main/java/com/dtstack/flink/sql/watermarker/CustomerWaterMarkerForTimeStamp.java index b7961e410..05ee3e46e 100644 --- a/core/src/main/java/com/dtstack/flink/sql/watermarker/CustomerWaterMarkerForTimeStamp.java +++ b/core/src/main/java/com/dtstack/flink/sql/watermarker/CustomerWaterMarkerForTimeStamp.java @@ -20,7 +20,6 @@ package com.dtstack.flink.sql.watermarker; -import com.dtstack.flink.sql.util.MathUtil; import org.apache.flink.streaming.api.windowing.time.Time; import org.apache.flink.types.Row; import org.slf4j.Logger; @@ -36,7 +35,7 @@ * @author xuchao */ -public class CustomerWaterMarkerForTimeStamp extends AbsCustomerWaterMarker { +public class CustomerWaterMarkerForTimeStamp extends AbstractCustomerWaterMarker { private static final Logger logger = LoggerFactory.getLogger(CustomerWaterMarkerForTimeStamp.class); diff --git a/core/src/main/java/com/dtstack/flink/sql/watermarker/WaterMarkerAssigner.java b/core/src/main/java/com/dtstack/flink/sql/watermarker/WaterMarkerAssigner.java index f3208db03..b0ca0335f 100644 --- a/core/src/main/java/com/dtstack/flink/sql/watermarker/WaterMarkerAssigner.java +++ b/core/src/main/java/com/dtstack/flink/sql/watermarker/WaterMarkerAssigner.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.watermarker; -import com.dtstack.flink.sql.table.SourceTableInfo; +import com.dtstack.flink.sql.table.AbstractSourceTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.RowTypeInfo; import com.google.common.base.Strings; @@ -40,7 +40,7 @@ public class WaterMarkerAssigner { - public boolean checkNeedAssignWaterMarker(SourceTableInfo tableInfo){ + public boolean checkNeedAssignWaterMarker(AbstractSourceTableInfo tableInfo){ if(Strings.isNullOrEmpty(tableInfo.getEventTimeField())){ return false; } @@ -48,7 +48,7 @@ public boolean checkNeedAssignWaterMarker(SourceTableInfo tableInfo){ return true; } - public DataStream assignWaterMarker(DataStream dataStream, RowTypeInfo typeInfo, SourceTableInfo sourceTableInfo){ + public DataStream assignWaterMarker(DataStream dataStream, RowTypeInfo typeInfo, AbstractSourceTableInfo sourceTableInfo){ String eventTimeFieldName = sourceTableInfo.getEventTimeField(); @@ -75,7 +75,7 @@ public DataStream assignWaterMarker(DataStream dataStream, RowTypeInfo type TypeInformation fieldType = fieldTypes[pos]; - AbsCustomerWaterMarker waterMarker = null; + AbstractCustomerWaterMarker waterMarker = null; if(fieldType.getTypeClass().isAssignableFrom(Timestamp.class)){ waterMarker = new CustomerWaterMarkerForTimeStamp(Time.milliseconds(maxOutOrderness), pos,timeZone); }else if(fieldType.getTypeClass().isAssignableFrom(Long.class)){ diff --git a/core/src/test/java/com/dtstack/flink/sql/side/SidePredicatesParserTest.java b/core/src/test/java/com/dtstack/flink/sql/side/SidePredicatesParserTest.java index 546fcbbac..feec8e547 100644 --- a/core/src/test/java/com/dtstack/flink/sql/side/SidePredicatesParserTest.java +++ b/core/src/test/java/com/dtstack/flink/sql/side/SidePredicatesParserTest.java @@ -54,7 +54,7 @@ public void testfillPredicatesForSideTable() throws SqlParseException { " MyTable.a='1' and s.d='1' and s.d <> '3' and s.c LIKE '%xx%' and s.c in ('1','2') and s.c between '10' and '23' and s.d is not null\n"; - SideTableInfo sideTableInfo = new SideTableInfo(){ + AbstractSideTableInfo sideTableInfo = new AbstractSideTableInfo(){ @Override public boolean check() { return false; @@ -63,7 +63,7 @@ public boolean check() { sideTableInfo.setName("sideTable"); - Map sideTableMap = new HashMap<>(); + Map sideTableMap = new HashMap<>(); sideTableMap.put("sideTable", sideTableInfo); SidePredicatesParser sidePredicatesParser = new SidePredicatesParser(); diff --git a/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllReqRow.java b/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllReqRow.java index e64e55987..b354a6269 100644 --- a/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllReqRow.java +++ b/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllReqRow.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.slf4j.Logger; @@ -43,15 +43,15 @@ public class Db2AllReqRow extends AbstractRdbAllReqRow { private static final String DB2_DRIVER = "com.ibm.db2.jcc.DB2Driver"; - public Db2AllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public Db2AllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new Db2AllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } @Override - public Connection getConn(String dbURL, String userName, String password) { + public Connection getConn(String dbUrl, String userName, String password) { try { Class.forName(DB2_DRIVER); - Connection conn = DriverManager.getConnection(dbURL, userName, password); + Connection conn = DriverManager.getConnection(dbUrl, userName, password); return conn; } catch (Exception e) { LOG.error("", e); diff --git a/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllSideInfo.java b/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllSideInfo.java index 282ee440c..0120c58a0 100644 --- a/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllSideInfo.java +++ b/db2/db2-side/db2-all-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AllSideInfo.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.RdbAllSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -35,7 +35,7 @@ */ public class Db2AllSideInfo extends RdbAllSideInfo { - public Db2AllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public Db2AllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java b/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java index 59571aa56..240bab9d1 100644 --- a/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java +++ b/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncReqRow; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import io.vertx.core.Vertx; @@ -48,7 +48,7 @@ public class Db2AsyncReqRow extends RdbAsyncReqRow { private final static String DB2_DRIVER = "com.ibm.db2.jcc.DB2Driver"; - public Db2AsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public Db2AsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new Db2AsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncSideInfo.java b/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncSideInfo.java index e53dd7f27..9cbdc997e 100644 --- a/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncSideInfo.java +++ b/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncSideInfo.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -35,7 +35,7 @@ */ public class Db2AsyncSideInfo extends RdbAsyncSideInfo { - public Db2AsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public Db2AsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/db2/db2-side/db2-side-core/src/main/java/com/dtstack/flink/sql/side/db2/table/Db2SideParser.java b/db2/db2-side/db2-side-core/src/main/java/com/dtstack/flink/sql/side/db2/table/Db2SideParser.java index db8a07dc6..96be4ec15 100644 --- a/db2/db2-side/db2-side-core/src/main/java/com/dtstack/flink/sql/side/db2/table/Db2SideParser.java +++ b/db2/db2-side/db2-side-core/src/main/java/com/dtstack/flink/sql/side/db2/table/Db2SideParser.java @@ -19,7 +19,7 @@ package com.dtstack.flink.sql.side.db2.table; import com.dtstack.flink.sql.side.rdb.table.RdbSideParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; @@ -36,8 +36,8 @@ public class Db2SideParser extends RdbSideParser { private static final String CURR_TYPE = "db2"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo tableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo tableInfo = super.getTableInfo(tableName, fieldsInfo, props); tableInfo.setType(CURR_TYPE); return tableInfo; diff --git a/db2/db2-sink/src/main/java/com/dtstack/flink/sql/sink/db/table/DbSinkParser.java b/db2/db2-sink/src/main/java/com/dtstack/flink/sql/sink/db/table/DbSinkParser.java index ba11aabf3..3e823ab20 100644 --- a/db2/db2-sink/src/main/java/com/dtstack/flink/sql/sink/db/table/DbSinkParser.java +++ b/db2/db2-sink/src/main/java/com/dtstack/flink/sql/sink/db/table/DbSinkParser.java @@ -1,7 +1,7 @@ package com.dtstack.flink.sql.sink.db.table; import com.dtstack.flink.sql.sink.rdb.table.RdbSinkParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; @@ -10,8 +10,8 @@ public class DbSinkParser extends RdbSinkParser { private static final String CURR_TYPE = "db2"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo tableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo tableInfo = super.getTableInfo(tableName, fieldsInfo, props); tableInfo.setType(CURR_TYPE); return tableInfo; } diff --git a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java index 7ae2fb1ae..8605613d8 100644 --- a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java +++ b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java @@ -22,7 +22,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.elasticsearch.table.ElasticsearchTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; @@ -162,7 +162,7 @@ public void setBulkFlushMaxActions(int bulkFlushMaxActions) { } @Override - public ElasticsearchSink genStreamSink(TargetTableInfo targetTableInfo) { + public ElasticsearchSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { ElasticsearchTableInfo elasticsearchTableInfo = (ElasticsearchTableInfo) targetTableInfo; esTableInfo = elasticsearchTableInfo; clusterName = elasticsearchTableInfo.getClusterName(); diff --git a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchSinkParser.java b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchSinkParser.java index 93682de46..7988e597c 100644 --- a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchSinkParser.java +++ b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchSinkParser.java @@ -21,8 +21,8 @@ package com.dtstack.flink.sql.sink.elasticsearch.table; -import com.dtstack.flink.sql.table.AbsTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; @@ -32,7 +32,7 @@ * @author sishu.yss * @Company: www.dtstack.com */ -public class ElasticsearchSinkParser extends AbsTableParser { +public class ElasticsearchSinkParser extends AbstractTableParser { private static final String KEY_ES_ADDRESS = "address"; @@ -56,7 +56,7 @@ protected boolean fieldNameNeedsUpperCase() { } @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { ElasticsearchTableInfo elasticsearchTableInfo = new ElasticsearchTableInfo(); elasticsearchTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, elasticsearchTableInfo); diff --git a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java index ac1d712ce..b6b6e0102 100644 --- a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java +++ b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java @@ -21,7 +21,7 @@ package com.dtstack.flink.sql.sink.elasticsearch.table; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import com.google.common.base.Preconditions; /** @@ -29,7 +29,7 @@ * @author sishu.yss * @Company: www.dtstack.com */ -public class ElasticsearchTableInfo extends TargetTableInfo { +public class ElasticsearchTableInfo extends AbstractTargetTableInfo { private static final String CURR_TYPE = "elasticsearch"; diff --git a/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java b/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java index 11380eea6..6f7720472 100644 --- a/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java +++ b/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java @@ -45,7 +45,7 @@ import java.util.*; import java.util.concurrent.atomic.AtomicReference; -public class HbaseAllReqRow extends AllReqRow { +public class HbaseAllReqRow extends BaseAllReqRow { private static final Logger LOG = LoggerFactory.getLogger(HbaseAllReqRow.class); @@ -55,7 +55,7 @@ public class HbaseAllReqRow extends AllReqRow { private AtomicReference>> cacheRef = new AtomicReference<>(); - public HbaseAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public HbaseAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new HbaseAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); tableName = ((HbaseSideTableInfo)sideTableInfo).getTableName(); @@ -134,7 +134,7 @@ public void flatMap(CRow input, Collector out) throws Exception { Map cacheList = null; - SideTableInfo sideTableInfo = sideInfo.getSideTableInfo(); + AbstractSideTableInfo sideTableInfo = sideInfo.getSideTableInfo(); HbaseSideTableInfo hbaseSideTableInfo = (HbaseSideTableInfo) sideTableInfo; if (hbaseSideTableInfo.isPreRowKey()) { for (Map.Entry> entry : cacheRef.get().entrySet()) { @@ -153,7 +153,7 @@ public void flatMap(CRow input, Collector out) throws Exception { } private void loadData(Map> tmpCache) throws SQLException { - SideTableInfo sideTableInfo = sideInfo.getSideTableInfo(); + AbstractSideTableInfo sideTableInfo = sideInfo.getSideTableInfo(); HbaseSideTableInfo hbaseSideTableInfo = (HbaseSideTableInfo) sideTableInfo; Configuration conf = new Configuration(); conf.set("hbase.zookeeper.quorum", hbaseSideTableInfo.getHost()); diff --git a/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllSideInfo.java b/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllSideInfo.java index ea51f46e4..663c2927b 100644 --- a/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllSideInfo.java +++ b/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllSideInfo.java @@ -22,8 +22,8 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.util.ParseUtils; import org.apache.calcite.sql.SqlNode; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -31,16 +31,16 @@ import java.util.List; -public class HbaseAllSideInfo extends SideInfo { +public class HbaseAllSideInfo extends BaseSideInfo { private RowKeyBuilder rowKeyBuilder; - public HbaseAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public HbaseAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { rowKeyBuilder = new RowKeyBuilder(); if(sideTableInfo.getPrimaryKeys().size() < 1){ throw new RuntimeException("Primary key dimension table must be filled"); diff --git a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncReqRow.java b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncReqRow.java index 251b88034..24ee01786 100644 --- a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncReqRow.java +++ b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAsyncReqRow.java @@ -21,12 +21,12 @@ package com.dtstack.flink.sql.side.hbase; import com.dtstack.flink.sql.enums.ECacheContentType; -import com.dtstack.flink.sql.side.AsyncReqRow; +import com.dtstack.flink.sql.side.BaseAsyncReqRow; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.cache.CacheObj; -import com.dtstack.flink.sql.side.hbase.rowkeydealer.AbsRowKeyModeDealer; +import com.dtstack.flink.sql.side.hbase.rowkeydealer.AbstractRowKeyModeDealer; import com.dtstack.flink.sql.side.hbase.rowkeydealer.PreRowKeyModeDealerDealer; import com.dtstack.flink.sql.side.hbase.rowkeydealer.RowKeyEqualModeDealer; import com.dtstack.flink.sql.side.hbase.table.HbaseSideTableInfo; @@ -58,7 +58,7 @@ * @author xuchao */ -public class HbaseAsyncReqRow extends AsyncReqRow { +public class HbaseAsyncReqRow extends BaseAsyncReqRow { private static final long serialVersionUID = 2098635104857937717L; @@ -73,13 +73,13 @@ public class HbaseAsyncReqRow extends AsyncReqRow { private transient HBaseClient hBaseClient; - private transient AbsRowKeyModeDealer rowKeyMode; + private transient AbstractRowKeyModeDealer rowKeyMode; private String tableName; private String[] colNames; - public HbaseAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public HbaseAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new HbaseAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); tableName = ((HbaseSideTableInfo)sideTableInfo).getTableName(); @@ -89,7 +89,7 @@ public HbaseAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List colRefType; - public HbaseAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public HbaseAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { rowKeyBuilder = new RowKeyBuilder(); if(sideTableInfo.getPrimaryKeys().size() < 1){ throw new RuntimeException("Primary key dimension table must be filled"); diff --git a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/AbsRowKeyModeDealer.java b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/AbstractRowKeyModeDealer.java similarity index 87% rename from hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/AbsRowKeyModeDealer.java rename to hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/AbstractRowKeyModeDealer.java index 1506bc440..90ee289bd 100644 --- a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/AbsRowKeyModeDealer.java +++ b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/AbstractRowKeyModeDealer.java @@ -21,7 +21,7 @@ package com.dtstack.flink.sql.side.hbase.rowkeydealer; import com.dtstack.flink.sql.side.FieldInfo; -import com.dtstack.flink.sql.side.cache.AbsSideCache; +import com.dtstack.flink.sql.side.cache.AbstractSideCache; import org.apache.calcite.sql.JoinType; import com.google.common.collect.Maps; import org.apache.flink.streaming.api.functions.async.ResultFuture; @@ -41,7 +41,7 @@ * @author xuchao */ -public abstract class AbsRowKeyModeDealer { +public abstract class AbstractRowKeyModeDealer { protected Map colRefType; @@ -60,9 +60,9 @@ public abstract class AbsRowKeyModeDealer { protected Map sideFieldIndex = Maps.newHashMap(); - public AbsRowKeyModeDealer(Map colRefType, String[] colNames, HBaseClient hBaseClient, - boolean openCache, JoinType joinType, List outFieldInfoList, - Map inFieldIndex, Map sideFieldIndex){ + public AbstractRowKeyModeDealer(Map colRefType, String[] colNames, HBaseClient hBaseClient, + boolean openCache, JoinType joinType, List outFieldInfoList, + Map inFieldIndex, Map sideFieldIndex){ this.colRefType = colRefType; this.colNames = colNames; this.hBaseClient = hBaseClient; @@ -111,5 +111,5 @@ protected Row fillData(Row input, Object sideInput){ } public abstract void asyncGetData(String tableName, String rowKeyStr, CRow input, ResultFuture resultFuture, - AbsSideCache sideCache); + AbstractSideCache sideCache); } diff --git a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java index a41ffe916..3fad216b2 100644 --- a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java +++ b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java @@ -23,7 +23,7 @@ import com.dtstack.flink.sql.enums.ECacheContentType; import com.dtstack.flink.sql.side.CacheMissVal; import com.dtstack.flink.sql.side.FieldInfo; -import com.dtstack.flink.sql.side.cache.AbsSideCache; +import com.dtstack.flink.sql.side.cache.AbstractSideCache; import com.dtstack.flink.sql.side.cache.CacheObj; import com.dtstack.flink.sql.side.hbase.utils.HbaseUtils; import com.google.common.collect.Maps; @@ -44,7 +44,6 @@ import org.slf4j.LoggerFactory; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -55,7 +54,7 @@ * @author xuchao */ -public class PreRowKeyModeDealerDealer extends AbsRowKeyModeDealer { +public class PreRowKeyModeDealerDealer extends AbstractRowKeyModeDealer { private static final Logger LOG = LoggerFactory.getLogger(PreRowKeyModeDealerDealer.class); @@ -67,7 +66,7 @@ public PreRowKeyModeDealerDealer(Map colRefType, String[] colNam @Override public void asyncGetData(String tableName, String rowKeyStr, CRow input, ResultFuture resultFuture, - AbsSideCache sideCache) { + AbstractSideCache sideCache) { Scanner prefixScanner = hBaseClient.newScanner(tableName); ScanFilter scanFilter = new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryPrefixComparator(Bytes.UTF8(rowKeyStr))); prefixScanner.setFilter(scanFilter); @@ -80,7 +79,7 @@ public void asyncGetData(String tableName, String rowKeyStr, CRow input, ResultF } - private String dealOneRow(ArrayList> args, String rowKeyStr, CRow input, ResultFuture resultFuture, AbsSideCache sideCache) { + private String dealOneRow(ArrayList> args, String rowKeyStr, CRow input, ResultFuture resultFuture, AbstractSideCache sideCache) { if(args == null || args.size() == 0){ dealMissKey(input, resultFuture); if (openCache) { diff --git a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java index b20c316db..01f43b246 100644 --- a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java +++ b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java @@ -23,7 +23,7 @@ import com.dtstack.flink.sql.enums.ECacheContentType; import com.dtstack.flink.sql.side.CacheMissVal; import com.dtstack.flink.sql.side.FieldInfo; -import com.dtstack.flink.sql.side.cache.AbsSideCache; +import com.dtstack.flink.sql.side.cache.AbstractSideCache; import com.dtstack.flink.sql.side.cache.CacheObj; import com.dtstack.flink.sql.side.hbase.utils.HbaseUtils; import com.google.common.collect.Maps; @@ -49,7 +49,7 @@ * @author xuchao */ -public class RowKeyEqualModeDealer extends AbsRowKeyModeDealer { +public class RowKeyEqualModeDealer extends AbstractRowKeyModeDealer { private static final Logger LOG = LoggerFactory.getLogger(RowKeyEqualModeDealer.class); @@ -62,7 +62,7 @@ public RowKeyEqualModeDealer(Map colRefType, String[] colNames, @Override public void asyncGetData(String tableName, String rowKeyStr, CRow input, ResultFuture resultFuture, - AbsSideCache sideCache){ + AbstractSideCache sideCache){ //TODO 是否有查询多个col family 和多个col的方法 GetRequest getRequest = new GetRequest(tableName, rowKeyStr); hBaseClient.get(getRequest).addCallbacks(arg -> { diff --git a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/ReplaceOperator.java b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/AbstractReplaceOperator.java similarity index 93% rename from hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/ReplaceOperator.java rename to hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/AbstractReplaceOperator.java index c3c9b5cfe..c75fa43c1 100644 --- a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/ReplaceOperator.java +++ b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/AbstractReplaceOperator.java @@ -31,11 +31,11 @@ * @author xuchao */ -public abstract class ReplaceOperator { +public abstract class AbstractReplaceOperator { private EReplaceOpType opType; - public ReplaceOperator(EReplaceOpType opType){ + public AbstractReplaceOperator(EReplaceOpType opType){ this.opType = opType; } diff --git a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/Md5ReplaceOperator.java b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/Md5ReplaceOperator.java index af5e24dd6..ab826c98f 100644 --- a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/Md5ReplaceOperator.java +++ b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/Md5ReplaceOperator.java @@ -30,7 +30,7 @@ * @author xuchao */ -public class Md5ReplaceOperator extends ReplaceOperator{ +public class Md5ReplaceOperator extends AbstractReplaceOperator { public Md5ReplaceOperator(EReplaceOpType opType) { super(opType); diff --git a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/enums/EReplaceOpType.java b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/enums/EReplaceOpType.java index 834edf9ae..ac9a3b87c 100644 --- a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/enums/EReplaceOpType.java +++ b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/enums/EReplaceOpType.java @@ -28,5 +28,12 @@ */ public enum EReplaceOpType { - NO_FUNC, MD5_FUNC; + /** + * 没有func + */ + NO_FUNC, + /** + * md5 func + */ + MD5_FUNC; } diff --git a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/enums/EReplaceType.java b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/enums/EReplaceType.java index 3768d7c5e..1ebb06216 100644 --- a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/enums/EReplaceType.java +++ b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/enums/EReplaceType.java @@ -28,6 +28,12 @@ * @author xuchao */ public enum EReplaceType { + /** + * 参数 + */ PARAM, + /** + * 常量 + */ CONSTANT; } diff --git a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/table/HbaseSideParser.java b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/table/HbaseSideParser.java index 7b627da4d..cf44c181f 100644 --- a/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/table/HbaseSideParser.java +++ b/hbase/hbase-side/hbase-side-core/src/main/java/com/dtstack/flink/sql/side/hbase/table/HbaseSideParser.java @@ -20,15 +20,15 @@ package com.dtstack.flink.sql.side.hbase.table; -import com.dtstack.flink.sql.table.AbsSideTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractSideTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; -import static com.dtstack.flink.sql.table.TableInfo.PARALLELISM_KEY; +import static com.dtstack.flink.sql.table.AbstractTableInfo.PARALLELISM_KEY; /** * hbase field information must include the definition of an alias -> sql which does not allow ":" @@ -37,7 +37,7 @@ * @author xuchao */ -public class HbaseSideParser extends AbsSideTableParser { +public class HbaseSideParser extends AbstractSideTableParser { private final static String FIELD_KEY = "fieldKey"; @@ -58,7 +58,7 @@ public HbaseSideParser() { } @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { HbaseSideTableInfo hbaseTableInfo = new HbaseSideTableInfo(); hbaseTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, hbaseTableInfo); @@ -77,7 +77,7 @@ public TableInfo getTableInfo(String tableName, String fieldsInfo, Map { +public class HbaseOutputFormat extends AbstractDtRichOutputFormat { private static final Logger LOG = LoggerFactory.getLogger(HbaseOutputFormat.class); diff --git a/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/HbaseSink.java b/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/HbaseSink.java index 447b02921..9308725a2 100644 --- a/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/HbaseSink.java +++ b/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/HbaseSink.java @@ -22,7 +22,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.hbase.table.HbaseTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -30,7 +30,6 @@ import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.functions.sink.OutputFormatSinkFunction; import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; -import org.apache.flink.table.sinks.AppendStreamTableSink; import org.apache.flink.table.sinks.RetractStreamTableSink; import org.apache.flink.table.sinks.TableSink; import org.apache.flink.types.Row; @@ -59,7 +58,7 @@ public HbaseSink() { } @Override - public HbaseSink genStreamSink(TargetTableInfo targetTableInfo) { + public HbaseSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { HbaseTableInfo hbaseTableInfo = (HbaseTableInfo) targetTableInfo; this.zookeeperQuorum = hbaseTableInfo.getHost(); this.port = hbaseTableInfo.getPort(); diff --git a/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseSinkParser.java b/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseSinkParser.java index f1ae72b6b..d09f54128 100644 --- a/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseSinkParser.java +++ b/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseSinkParser.java @@ -22,8 +22,8 @@ import com.dtstack.flink.sql.enums.EUpdateMode; -import com.dtstack.flink.sql.table.AbsTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.DtStringUtil; import com.dtstack.flink.sql.util.MathUtil; @@ -31,14 +31,14 @@ import java.util.List; import java.util.Map; -import static com.dtstack.flink.sql.table.TableInfo.PARALLELISM_KEY; +import static com.dtstack.flink.sql.table.AbstractTableInfo.PARALLELISM_KEY; /** * Date: 2018/09/14 * Company: www.dtstack.com * @author sishu.yss */ -public class HbaseSinkParser extends AbsTableParser { +public class HbaseSinkParser extends AbstractTableParser { public static final String HBASE_ZOOKEEPER_QUORUM = "zookeeperQuorum"; @@ -59,7 +59,7 @@ protected boolean fieldNameNeedsUpperCase() { } @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { HbaseTableInfo hbaseTableInfo = new HbaseTableInfo(); hbaseTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, hbaseTableInfo); diff --git a/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseTableInfo.java b/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseTableInfo.java index 610eb5c30..62f7c9936 100644 --- a/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseTableInfo.java +++ b/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/table/HbaseTableInfo.java @@ -21,7 +21,7 @@ package com.dtstack.flink.sql.sink.hbase.table; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import com.google.common.base.Preconditions; import java.util.Map; @@ -30,7 +30,7 @@ * Company: www.dtstack.com * @author sishu.yss */ -public class HbaseTableInfo extends TargetTableInfo { +public class HbaseTableInfo extends AbstractTargetTableInfo { private static final String CURR_TYPE = "hbase"; diff --git a/hbase/pom.xml b/hbase/pom.xml index 835457e29..f64ffab1b 100644 --- a/hbase/pom.xml +++ b/hbase/pom.xml @@ -29,6 +29,13 @@ 1.0-SNAPSHOT provided + + jdk.tools + jdk.tools + 1.6 + system + ${JAVA_HOME}/lib/tools.jar + org.apache.hbase diff --git a/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllReqRow.java b/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllReqRow.java index 4118421ee..10938308a 100644 --- a/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllReqRow.java +++ b/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllReqRow.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.impala.table.ImpalaSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.JDBCUtils; @@ -53,13 +53,13 @@ public class ImpalaAllReqRow extends AbstractRdbAllReqRow { private ImpalaSideTableInfo impalaSideTableInfo; - public ImpalaAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public ImpalaAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new ImpalaAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); this.impalaSideTableInfo = (ImpalaSideTableInfo) sideTableInfo; } @Override - public Connection getConn(String dbURL, String userName, String password) { + public Connection getConn(String dbUrl, String userName, String password) { try { Connection connection ; String url = getUrl(); diff --git a/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllSideInfo.java b/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllSideInfo.java index d40c5f48c..a5e643967 100644 --- a/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllSideInfo.java +++ b/impala/impala-side/impala-all-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAllSideInfo.java @@ -20,10 +20,9 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.impala.table.ImpalaSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.RdbAllSideInfo; -import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; import java.util.List; @@ -32,7 +31,7 @@ public class ImpalaAllSideInfo extends RdbAllSideInfo { - public ImpalaAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public ImpalaAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @@ -61,7 +60,7 @@ private String buildPartitionCondition(ImpalaSideTableInfo impalaSideTableInfo) private String getPartitionVaule(String fieldType, List values) { String partitionVaule = values.stream().map(val -> { - return (fieldType.toLowerCase().equals("string") || fieldType.toLowerCase().equals("varchar")) ? "'" + val + "'" : val.toString(); + return ("string".equals(fieldType.toLowerCase()) || "varchar".equals(fieldType.toLowerCase())) ? "'" + val + "'" : val.toString(); }).collect(Collectors.joining(" , ")).toString(); return partitionVaule; diff --git a/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java b/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java index 1b983ca2c..77d3f3007 100644 --- a/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java +++ b/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.impala.table.ImpalaSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncReqRow; import io.vertx.core.Vertx; @@ -50,7 +50,7 @@ public class ImpalaAsyncReqRow extends RdbAsyncReqRow { private final static String IMPALA_DRIVER = "com.cloudera.impala.jdbc41.Driver"; - public ImpalaAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public ImpalaAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new ImpalaAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncSideInfo.java b/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncSideInfo.java index 53559b9a7..14fbf0ed3 100644 --- a/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncSideInfo.java +++ b/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncSideInfo.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.impala.table.ImpalaSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -38,7 +38,7 @@ public class ImpalaAsyncSideInfo extends RdbAsyncSideInfo { - public ImpalaAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public ImpalaAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @@ -69,7 +69,7 @@ private String buildPartitionCondition(ImpalaSideTableInfo impalaSideTableInfo) private String getPartitionVaule(String fieldType, List values) { String partitionVaule = values.stream().map(val -> { - return (fieldType.toLowerCase().equals("string") || fieldType.toLowerCase().equals("varchar")) ? "'" + val + "'" : val.toString(); + return ("string".equals(fieldType.toLowerCase()) || "varchar".equals(fieldType.toLowerCase())) ? "'" + val + "'" : val.toString(); }).collect(Collectors.joining(" , ")).toString(); return partitionVaule; diff --git a/impala/impala-side/impala-side-core/src/main/java/com/dtstack/flink/sql/side/impala/table/ImpalaSideParser.java b/impala/impala-side/impala-side-core/src/main/java/com/dtstack/flink/sql/side/impala/table/ImpalaSideParser.java index 344037124..10a13ec72 100644 --- a/impala/impala-side/impala-side-core/src/main/java/com/dtstack/flink/sql/side/impala/table/ImpalaSideParser.java +++ b/impala/impala-side/impala-side-core/src/main/java/com/dtstack/flink/sql/side/impala/table/ImpalaSideParser.java @@ -19,14 +19,13 @@ package com.dtstack.flink.sql.side.impala.table; import com.dtstack.flink.sql.side.rdb.table.RdbSideParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.math.BigDecimal; -import java.sql.Date; import java.sql.Timestamp; import java.util.Arrays; import java.util.HashMap; @@ -48,7 +47,7 @@ public class ImpalaSideParser extends RdbSideParser { private static final String CURR_TYPE = "impala"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { ImpalaSideTableInfo impalaSideTableInfo = new ImpalaSideTableInfo(); impalaSideTableInfo.setType(CURR_TYPE); impalaSideTableInfo.setName(tableName); @@ -147,6 +146,8 @@ public Class dbTypeConvertToJavaType(String fieldType) { return String.class; case "timestamp": return Timestamp.class; + default: + break; } throw new RuntimeException("不支持 " + fieldType + " 类型"); diff --git a/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/ImpalaSink.java b/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/ImpalaSink.java index 91b6532bf..cd1e1e945 100644 --- a/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/ImpalaSink.java +++ b/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/ImpalaSink.java @@ -23,7 +23,7 @@ import com.dtstack.flink.sql.sink.rdb.JDBCOptions; import com.dtstack.flink.sql.sink.rdb.AbstractRdbSink; import com.dtstack.flink.sql.sink.rdb.format.JDBCUpsertOutputFormat; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; @@ -121,7 +121,7 @@ public String getImpalaJdbcUrl() { } @Override - public AbstractRdbSink genStreamSink(TargetTableInfo targetTableInfo) { + public AbstractRdbSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { super.genStreamSink(targetTableInfo); this.impalaTableInfo = (ImpalaTableInfo) targetTableInfo; return this; diff --git a/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/table/ImpalaSinkParser.java b/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/table/ImpalaSinkParser.java index 4921f5e51..5b6fdeafe 100644 --- a/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/table/ImpalaSinkParser.java +++ b/impala/impala-sink/src/main/java/com/dtstack/flink/sql/sink/impala/table/ImpalaSinkParser.java @@ -19,12 +19,11 @@ package com.dtstack.flink.sql.sink.impala.table; import com.dtstack.flink.sql.sink.rdb.table.RdbSinkParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.math.BigDecimal; import java.sql.Timestamp; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -42,7 +41,7 @@ public class ImpalaSinkParser extends RdbSinkParser { private static final String CURR_TYPE = "impala"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { ImpalaTableInfo impalaTableInfo = new ImpalaTableInfo(); impalaTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, impalaTableInfo); @@ -117,6 +116,8 @@ public Class dbTypeConvertToJavaType(String fieldType) { return String.class; case "timestamp": return Timestamp.class; + default: + break; } throw new RuntimeException("不支持 " + fieldType + " 类型"); diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java index 4fb567f82..c6cce594c 100644 --- a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java @@ -19,8 +19,8 @@ package com.dtstack.flink.sql.sink.kafka.table; import com.dtstack.flink.sql.format.FormatType; -import com.dtstack.flink.sql.table.AbsTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; @@ -32,9 +32,9 @@ * @author DocLi * @modifyer maqi */ -public class KafkaSinkParser extends AbsTableParser { +public class KafkaSinkParser extends AbstractTableParser { @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { KafkaSinkTableInfo kafkaSinkTableInfo = new KafkaSinkTableInfo(); kafkaSinkTableInfo.setName(tableName); kafkaSinkTableInfo.setType(MathUtil.getString(props.get(KafkaSinkTableInfo.TYPE_KEY.toLowerCase()))); diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkTableInfo.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkTableInfo.java index 6ac728ee0..ec554d5f1 100644 --- a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkTableInfo.java +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkTableInfo.java @@ -18,8 +18,7 @@ package com.dtstack.flink.sql.sink.kafka.table; -import com.dtstack.flink.sql.format.FormatType; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import com.google.common.base.Preconditions; import java.util.HashMap; @@ -33,7 +32,7 @@ * @author DocLi * @modifyer maqi */ -public class KafkaSinkTableInfo extends TargetTableInfo { +public class KafkaSinkTableInfo extends AbstractTargetTableInfo { public static final String BOOTSTRAPSERVERS_KEY = "bootstrapServers"; diff --git a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java index 10049cbbe..8b0b9753c 100644 --- a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java +++ b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java @@ -19,8 +19,8 @@ package com.dtstack.flink.sql.source.kafka.table; -import com.dtstack.flink.sql.table.AbsSourceParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractSourceParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; @@ -32,9 +32,9 @@ * * @author sishu.yss */ -public class KafkaSourceParser extends AbsSourceParser { +public class KafkaSourceParser extends AbstractSourceParser { @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) throws Exception { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) throws Exception { KafkaSourceTableInfo kafkaSourceTableInfo = new KafkaSourceTableInfo(); kafkaSourceTableInfo.setName(tableName); @@ -43,7 +43,7 @@ public TableInfo getTableInfo(String tableName, String fieldsInfo, Map, IStreamSinkGener< protected Optional> partitioner; @Override - public KafkaSink genStreamSink(TargetTableInfo targetTableInfo) { + public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { KafkaSinkTableInfo kafkaSinkTableInfo = (KafkaSinkTableInfo) targetTableInfo; this.topic = kafkaSinkTableInfo.getTopic(); diff --git a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java b/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java index a445a6529..7e2d77c27 100644 --- a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java +++ b/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.source.IStreamSourceGener; import com.dtstack.flink.sql.source.kafka.table.KafkaSourceTableInfo; -import com.dtstack.flink.sql.table.SourceTableInfo; +import com.dtstack.flink.sql.table.AbstractSourceTableInfo; import com.dtstack.flink.sql.util.DtStringUtil; import com.dtstack.flink.sql.util.PluginUtil; import org.apache.commons.lang3.StringUtils; @@ -56,7 +56,7 @@ public class KafkaSource implements IStreamSourceGener { */ @SuppressWarnings("rawtypes") @Override - public Table genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { + public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { KafkaSourceTableInfo kafkaSourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo; String topicName = kafkaSourceTableInfo.getTopic(); @@ -91,7 +91,7 @@ public Table genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnv } else if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())) {// {"0":12312,"1":12321,"2":12312} try { Properties properties = PluginUtil.jsonStrToObject(kafkaSourceTableInfo.getOffsetReset(), Properties.class); - Map offsetMap = PluginUtil.ObjectToMap(properties); + Map offsetMap = PluginUtil.objectToMap(properties); Map specificStartupOffsets = new HashMap<>(); for (Map.Entry entry : offsetMap.entrySet()) { specificStartupOffsets.put(new KafkaTopicPartition(topicName, Integer.valueOf(entry.getKey())), Long.valueOf(entry.getValue().toString())); diff --git a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index 6d39536a5..d8c66cbc8 100644 --- a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -68,7 +68,7 @@ public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener< @Override - public KafkaSink genStreamSink(TargetTableInfo targetTableInfo) { + public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { KafkaSinkTableInfo kafka09SinkTableInfo = (KafkaSinkTableInfo) targetTableInfo; this.topic = kafka09SinkTableInfo.getTopic(); diff --git a/kafka09/kafka09-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java b/kafka09/kafka09-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java index 21ed7c3e3..ffb466a4c 100644 --- a/kafka09/kafka09-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java +++ b/kafka09/kafka09-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java @@ -22,7 +22,7 @@ import com.dtstack.flink.sql.source.IStreamSourceGener; import com.dtstack.flink.sql.source.kafka.table.KafkaSourceTableInfo; -import com.dtstack.flink.sql.table.SourceTableInfo; +import com.dtstack.flink.sql.table.AbstractSourceTableInfo; import com.dtstack.flink.sql.util.DtStringUtil; import com.dtstack.flink.sql.util.PluginUtil; import org.apache.commons.lang3.StringUtils; @@ -59,7 +59,7 @@ public class KafkaSource implements IStreamSourceGener
{ */ @SuppressWarnings("rawtypes") @Override - public Table genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { + public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { KafkaSourceTableInfo kafkaSourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo; String topicName = kafkaSourceTableInfo.getTopic(); @@ -94,7 +94,7 @@ public Table genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnv }else if(DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())){// {"0":12312,"1":12321,"2":12312} try { Properties properties = PluginUtil.jsonStrToObject(kafkaSourceTableInfo.getOffsetReset(), Properties.class); - Map offsetMap = PluginUtil.ObjectToMap(properties); + Map offsetMap = PluginUtil.objectToMap(properties); Map specificStartupOffsets = new HashMap<>(); for(Map.Entry entry:offsetMap.entrySet()){ specificStartupOffsets.put(new KafkaTopicPartition(topicName,Integer.valueOf(entry.getKey())),Long.valueOf(entry.getValue().toString())); diff --git a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index dee05ab08..67df2159c 100644 --- a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -69,7 +69,7 @@ public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener< protected Optional> partitioner; @Override - public KafkaSink genStreamSink(TargetTableInfo targetTableInfo) { + public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { KafkaSinkTableInfo kafka10SinkTableInfo = (KafkaSinkTableInfo) targetTableInfo; this.topic = kafka10SinkTableInfo.getTopic(); diff --git a/kafka10/kafka10-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java b/kafka10/kafka10-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java index 55cffbf1e..ff4aed89d 100644 --- a/kafka10/kafka10-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java +++ b/kafka10/kafka10-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.source.IStreamSourceGener; import com.dtstack.flink.sql.source.kafka.table.KafkaSourceTableInfo; -import com.dtstack.flink.sql.table.SourceTableInfo; +import com.dtstack.flink.sql.table.AbstractSourceTableInfo; import com.dtstack.flink.sql.util.DtStringUtil; import com.dtstack.flink.sql.util.PluginUtil; import org.apache.commons.lang3.StringUtils; @@ -60,7 +60,7 @@ public class KafkaSource implements IStreamSourceGener
{ */ @SuppressWarnings("rawtypes") @Override - public Table genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { + public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { KafkaSourceTableInfo kafkaSourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo; String topicName = kafkaSourceTableInfo.getTopic(); @@ -94,7 +94,7 @@ public Table genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnv } else if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())) {// {"0":12312,"1":12321,"2":12312} try { Properties properties = PluginUtil.jsonStrToObject(kafkaSourceTableInfo.getOffsetReset(), Properties.class); - Map offsetMap = PluginUtil.ObjectToMap(properties); + Map offsetMap = PluginUtil.objectToMap(properties); Map specificStartupOffsets = new HashMap<>(); for (Map.Entry entry : offsetMap.entrySet()) { specificStartupOffsets.put(new KafkaTopicPartition(topicName, Integer.valueOf(entry.getKey())), Long.valueOf(entry.getValue().toString())); diff --git a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index c2ef7d35a..a3533b7aa 100644 --- a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -70,7 +70,7 @@ public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener @Override - public KafkaSink genStreamSink(TargetTableInfo targetTableInfo) { + public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { KafkaSinkTableInfo kafka11SinkTableInfo = (KafkaSinkTableInfo) targetTableInfo; this.topic = kafka11SinkTableInfo.getTopic(); diff --git a/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java b/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java index 1a4eed08a..11be1898a 100644 --- a/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java +++ b/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.source.IStreamSourceGener; import com.dtstack.flink.sql.source.kafka.table.KafkaSourceTableInfo; -import com.dtstack.flink.sql.table.SourceTableInfo; +import com.dtstack.flink.sql.table.AbstractSourceTableInfo; import com.dtstack.flink.sql.util.DtStringUtil; import com.dtstack.flink.sql.util.PluginUtil; import org.apache.commons.lang3.StringUtils; @@ -60,7 +60,7 @@ public class KafkaSource implements IStreamSourceGener
{ */ @SuppressWarnings("rawtypes") @Override - public Table genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { + public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { KafkaSourceTableInfo kafkaSourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo; String topicName = kafkaSourceTableInfo.getTopic(); @@ -96,7 +96,7 @@ public Table genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnv } else if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())) {// {"0":12312,"1":12321,"2":12312} try { Properties properties = PluginUtil.jsonStrToObject(kafkaSourceTableInfo.getOffsetReset(), Properties.class); - Map offsetMap = PluginUtil.ObjectToMap(properties); + Map offsetMap = PluginUtil.objectToMap(properties); Map specificStartupOffsets = new HashMap<>(); for (Map.Entry entry : offsetMap.entrySet()) { specificStartupOffsets.put(new KafkaTopicPartition(topicName, Integer.valueOf(entry.getKey())), Long.valueOf(entry.getValue().toString())); diff --git a/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java b/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java index cf2c70f1f..45b312ffa 100644 --- a/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java +++ b/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java @@ -1,10 +1,10 @@ package com.dtstack.flink.sql.side.kudu; -import com.dtstack.flink.sql.side.AllReqRow; +import com.dtstack.flink.sql.side.BaseAllReqRow; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.PredicateInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.kudu.table.KuduSideTableInfo; import com.dtstack.flink.sql.side.kudu.utils.KuduUtil; import org.apache.calcite.sql.JoinType; @@ -39,7 +39,7 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicReference; -public class KuduAllReqRow extends AllReqRow { +public class KuduAllReqRow extends BaseAllReqRow { private static final long serialVersionUID = 6051774809356082219L; @@ -60,7 +60,7 @@ public class KuduAllReqRow extends AllReqRow { private AtomicReference>>> cacheRef = new AtomicReference<>(); - public KuduAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public KuduAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new KuduAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllSideInfo.java b/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllSideInfo.java index ddfa73201..1241a6f37 100644 --- a/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllSideInfo.java +++ b/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllSideInfo.java @@ -2,8 +2,8 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.util.ParseUtils; import org.apache.calcite.sql.SqlNode; import org.apache.commons.collections.CollectionUtils; @@ -12,16 +12,16 @@ import java.util.List; -public class KuduAllSideInfo extends SideInfo { +public class KuduAllSideInfo extends BaseSideInfo { private static final long serialVersionUID = 9005389633060174746L; - public KuduAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public KuduAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { // no use } diff --git a/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java b/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java index 12689a07b..1cdc75e1b 100644 --- a/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java +++ b/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java @@ -29,7 +29,7 @@ import java.util.List; import java.util.Map; -public class KuduAsyncReqRow extends AsyncReqRow { +public class KuduAsyncReqRow extends BaseAsyncReqRow { private static final Logger LOG = LoggerFactory.getLogger(KuduAsyncReqRow.class); /** @@ -52,7 +52,7 @@ public class KuduAsyncReqRow extends AsyncReqRow { private AsyncKuduScanner.AsyncKuduScannerBuilder scannerBuilder; - public KuduAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public KuduAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new KuduAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncSideInfo.java b/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncSideInfo.java index de75f23fb..5ace515f7 100644 --- a/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncSideInfo.java +++ b/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncSideInfo.java @@ -2,8 +2,8 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import org.apache.calcite.sql.SqlBasicCall; import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlKind; @@ -12,15 +12,15 @@ import java.util.List; -public class KuduAsyncSideInfo extends SideInfo { +public class KuduAsyncSideInfo extends BaseSideInfo { - public KuduAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public KuduAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { } @Override diff --git a/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideParser.java b/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideParser.java index 83864fa22..62d215d87 100644 --- a/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideParser.java +++ b/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideParser.java @@ -1,7 +1,7 @@ package com.dtstack.flink.sql.side.kudu.table; -import com.dtstack.flink.sql.table.AbsSideTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractSideTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.math.BigDecimal; @@ -9,9 +9,9 @@ import java.sql.Timestamp; import java.util.Map; -import static com.dtstack.flink.sql.table.TableInfo.PARALLELISM_KEY; +import static com.dtstack.flink.sql.table.AbstractTableInfo.PARALLELISM_KEY; -public class KuduSideParser extends AbsSideTableParser { +public class KuduSideParser extends AbstractSideTableParser { public static final String KUDU_MASTERS = "kuduMasters"; @@ -52,7 +52,7 @@ public class KuduSideParser extends AbsSideTableParser { @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { KuduSideTableInfo kuduSideTableInfo = new KuduSideTableInfo(); kuduSideTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, kuduSideTableInfo); diff --git a/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideTableInfo.java b/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideTableInfo.java index c527ec268..eb8df5ad9 100644 --- a/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideTableInfo.java +++ b/kudu/kudu-side/kudu-side-core/src/main/java/com/dtstack/flink/sql/side/kudu/table/KuduSideTableInfo.java @@ -1,9 +1,9 @@ package com.dtstack.flink.sql.side.kudu.table; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.google.common.base.Preconditions; -public class KuduSideTableInfo extends SideTableInfo { +public class KuduSideTableInfo extends AbstractSideTableInfo { private static final String CURR_TYPE = "kudu"; diff --git a/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/KuduOutputFormat.java b/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/KuduOutputFormat.java index 780319d82..c211c7a24 100644 --- a/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/KuduOutputFormat.java +++ b/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/KuduOutputFormat.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.sink.kudu; -import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; +import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.configuration.Configuration; @@ -42,7 +42,7 @@ * @author gituser * @modify xiuzhu */ -public class KuduOutputFormat extends DtRichOutputFormat { +public class KuduOutputFormat extends AbstractDtRichOutputFormat { private static final long serialVersionUID = 1L; diff --git a/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/KuduSink.java b/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/KuduSink.java index 0c90ffe94..4c7c3bea8 100644 --- a/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/KuduSink.java +++ b/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/KuduSink.java @@ -3,7 +3,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kudu.table.KuduTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -39,7 +39,7 @@ public class KuduSink implements RetractStreamTableSink, Serializable, IStr private int parallelism = -1; @Override - public KuduSink genStreamSink(TargetTableInfo targetTableInfo) { + public KuduSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { KuduTableInfo kuduTableInfo = (KuduTableInfo) targetTableInfo; this.kuduMasters = kuduTableInfo.getKuduMasters(); this.tableName = kuduTableInfo.getTableName(); diff --git a/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduSinkParser.java b/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduSinkParser.java index e38a85390..20302d44f 100644 --- a/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduSinkParser.java +++ b/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduSinkParser.java @@ -1,8 +1,8 @@ package com.dtstack.flink.sql.sink.kudu.table; import com.dtstack.flink.sql.sink.kudu.KuduOutputFormat; -import com.dtstack.flink.sql.table.AbsTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.math.BigDecimal; @@ -10,9 +10,9 @@ import java.sql.Timestamp; import java.util.Map; -import static com.dtstack.flink.sql.table.TableInfo.PARALLELISM_KEY; +import static com.dtstack.flink.sql.table.AbstractTableInfo.PARALLELISM_KEY; -public class KuduSinkParser extends AbsTableParser { +public class KuduSinkParser extends AbstractTableParser { public static final String KUDU_MASTERS = "kuduMasters"; @@ -27,7 +27,7 @@ public class KuduSinkParser extends AbsTableParser { public static final String SOCKET_READ_TIMEOUT_MS = "defaultSocketReadTimeoutMs"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { KuduTableInfo kuduTableInfo = new KuduTableInfo(); kuduTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, kuduTableInfo); diff --git a/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduTableInfo.java b/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduTableInfo.java index c086c7298..80e47761b 100644 --- a/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduTableInfo.java +++ b/kudu/kudu-sink/src/main/java/com/dtstack/flink/sql/sink/kudu/table/KuduTableInfo.java @@ -1,10 +1,10 @@ package com.dtstack.flink.sql.sink.kudu.table; import com.dtstack.flink.sql.sink.kudu.KuduOutputFormat; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import com.google.common.base.Preconditions; -public class KuduTableInfo extends TargetTableInfo { +public class KuduTableInfo extends AbstractTargetTableInfo { private static final String CURR_TYPE = "kudu"; diff --git a/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java b/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java index 3229101c6..79f5bd9ca 100644 --- a/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java +++ b/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java @@ -18,10 +18,10 @@ package com.dtstack.flink.sql.side.mongo; -import com.dtstack.flink.sql.side.AllReqRow; +import com.dtstack.flink.sql.side.BaseAllReqRow; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.mongo.table.MongoSideTableInfo; import com.dtstack.flink.sql.side.mongo.utils.MongoUtil; import com.mongodb.BasicDBObject; @@ -57,7 +57,7 @@ * * @author xuqianjin */ -public class MongoAllReqRow extends AllReqRow { +public class MongoAllReqRow extends BaseAllReqRow { private static final long serialVersionUID = -675332795591842778L; @@ -73,7 +73,7 @@ public class MongoAllReqRow extends AllReqRow { private AtomicReference>>> cacheRef = new AtomicReference<>(); - public MongoAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public MongoAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new MongoAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllSideInfo.java b/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllSideInfo.java index b2d5c2686..4a33f1a69 100644 --- a/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllSideInfo.java +++ b/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllSideInfo.java @@ -20,8 +20,8 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.util.ParseUtils; import org.apache.calcite.sql.SqlNode; import org.apache.commons.collections.CollectionUtils; @@ -36,16 +36,16 @@ * * @author xuqianjin */ -public class MongoAllSideInfo extends SideInfo{ +public class MongoAllSideInfo extends BaseSideInfo { private static final long serialVersionUID = -1696860430075523841L; - public MongoAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public MongoAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { } @Override diff --git a/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncReqRow.java b/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncReqRow.java index 5ef06c80e..491f62922 100644 --- a/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncReqRow.java +++ b/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncReqRow.java @@ -20,27 +20,22 @@ package com.dtstack.flink.sql.side.mongo; import com.dtstack.flink.sql.enums.ECacheContentType; -import com.dtstack.flink.sql.side.AsyncReqRow; +import com.dtstack.flink.sql.side.BaseAsyncReqRow; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.cache.CacheObj; import com.dtstack.flink.sql.side.mongo.table.MongoSideTableInfo; import com.dtstack.flink.sql.side.mongo.utils.MongoUtil; import com.mongodb.BasicDBObject; import com.mongodb.Block; import com.mongodb.ConnectionString; -import com.mongodb.MongoCredential; -import com.mongodb.ServerAddress; import com.mongodb.async.SingleResultCallback; import com.mongodb.async.client.MongoClient; import com.mongodb.MongoClientSettings; import com.mongodb.async.client.MongoClients; import com.mongodb.async.client.MongoCollection; import com.mongodb.async.client.MongoDatabase; -import com.mongodb.connection.ClusterSettings; -import com.mongodb.connection.ConnectionPoolSettings; -import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.java.typeutils.RowTypeInfo; import com.google.common.collect.Lists; import org.apache.flink.configuration.Configuration; @@ -53,7 +48,6 @@ import org.slf4j.LoggerFactory; import java.sql.Timestamp; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -66,7 +60,7 @@ * * @author xuqianjin */ -public class MongoAsyncReqRow extends AsyncReqRow { +public class MongoAsyncReqRow extends BaseAsyncReqRow { private static final long serialVersionUID = -1183158242862673706L; private static final Logger LOG = LoggerFactory.getLogger(MongoAsyncReqRow.class); @@ -77,7 +71,7 @@ public class MongoAsyncReqRow extends AsyncReqRow { private MongoSideTableInfo mongoSideTableInfo; - public MongoAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public MongoAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new MongoAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncSideInfo.java b/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncSideInfo.java index de81ed1f1..19534d4e1 100644 --- a/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncSideInfo.java +++ b/mongo/mongo-side/mongo-async-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAsyncSideInfo.java @@ -20,16 +20,13 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; -import com.dtstack.flink.sql.side.mongo.table.MongoSideTableInfo; -import com.dtstack.flink.sql.util.ParseUtils; +import com.dtstack.flink.sql.side.BaseSideInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import org.apache.calcite.sql.SqlBasicCall; import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlNode; import org.apache.flink.api.java.typeutils.RowTypeInfo; -import com.google.common.collect.Lists; import java.util.List; @@ -39,16 +36,16 @@ * * @author xuqianjin */ -public class MongoAsyncSideInfo extends SideInfo { +public class MongoAsyncSideInfo extends BaseSideInfo { private static final long serialVersionUID = -3694857194254465989L; - public MongoAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public MongoAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { } diff --git a/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/table/MongoSideParser.java b/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/table/MongoSideParser.java index 5edc12419..4fe1ebee1 100644 --- a/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/table/MongoSideParser.java +++ b/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/table/MongoSideParser.java @@ -19,15 +19,13 @@ package com.dtstack.flink.sql.side.mongo.table; -import com.dtstack.flink.sql.table.AbsSideTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractSideTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import static com.dtstack.flink.sql.table.TableInfo.PARALLELISM_KEY; +import static com.dtstack.flink.sql.table.AbstractTableInfo.PARALLELISM_KEY; /** * Reason: @@ -35,7 +33,7 @@ * * @author xuqianjin */ -public class MongoSideParser extends AbsSideTableParser { +public class MongoSideParser extends AbstractSideTableParser { public static final String ADDRESS_KEY = "address"; @@ -48,7 +46,7 @@ public class MongoSideParser extends AbsSideTableParser { public static final String DATABASE_KEY = "database"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { MongoSideTableInfo mongoSideTableInfo = new MongoSideTableInfo(); mongoSideTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, mongoSideTableInfo); diff --git a/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/table/MongoSideTableInfo.java b/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/table/MongoSideTableInfo.java index a5c834469..c83de7bbc 100644 --- a/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/table/MongoSideTableInfo.java +++ b/mongo/mongo-side/mongo-side-core/src/main/java/com/dtstack/flink/sql/side/mongo/table/MongoSideTableInfo.java @@ -19,7 +19,7 @@ package com.dtstack.flink.sql.side.mongo.table; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.google.common.base.Preconditions; /** @@ -28,7 +28,7 @@ * * @author xuqianjin */ -public class MongoSideTableInfo extends SideTableInfo { +public class MongoSideTableInfo extends AbstractSideTableInfo { private static final long serialVersionUID = -1L; diff --git a/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoOutputFormat.java b/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoOutputFormat.java index daaded15e..03ef8f3b3 100644 --- a/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoOutputFormat.java +++ b/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoOutputFormat.java @@ -19,7 +19,7 @@ package com.dtstack.flink.sql.sink.mongo; -import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; +import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat; import com.mongodb.MongoClient; import com.mongodb.MongoClientURI; import com.mongodb.client.MongoCollection; @@ -41,7 +41,7 @@ * * @author xuqianjin */ -public class MongoOutputFormat extends DtRichOutputFormat { +public class MongoOutputFormat extends AbstractDtRichOutputFormat { private static final Logger LOG = LoggerFactory.getLogger(MongoOutputFormat.class); private String address; diff --git a/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoSink.java b/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoSink.java index 4e28d8fd2..3f34055ec 100644 --- a/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoSink.java +++ b/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/MongoSink.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.mongo.table.MongoTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -54,7 +54,7 @@ public MongoSink() { } @Override - public MongoSink genStreamSink(TargetTableInfo targetTableInfo) { + public MongoSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { MongoTableInfo mongoTableInfo = (MongoTableInfo) targetTableInfo; this.address = mongoTableInfo.getAddress(); this.tableName = mongoTableInfo.getTableName(); diff --git a/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/table/MongoSinkParser.java b/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/table/MongoSinkParser.java index d4810fb6d..f093b70aa 100644 --- a/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/table/MongoSinkParser.java +++ b/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/table/MongoSinkParser.java @@ -19,13 +19,13 @@ package com.dtstack.flink.sql.sink.mongo.table; -import com.dtstack.flink.sql.table.AbsTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; -import static com.dtstack.flink.sql.table.TableInfo.PARALLELISM_KEY; +import static com.dtstack.flink.sql.table.AbstractTableInfo.PARALLELISM_KEY; /** * Reason: @@ -34,7 +34,7 @@ * @author xuqianjin */ -public class MongoSinkParser extends AbsTableParser { +public class MongoSinkParser extends AbstractTableParser { private static final String CURR_TYPE = "mongo"; @@ -49,7 +49,7 @@ public class MongoSinkParser extends AbsTableParser { public static final String DATABASE_KEY = "database"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { MongoTableInfo mongoTableInfo = new MongoTableInfo(); mongoTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, mongoTableInfo); diff --git a/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/table/MongoTableInfo.java b/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/table/MongoTableInfo.java index 02a96d6bb..c9d94dfe6 100644 --- a/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/table/MongoTableInfo.java +++ b/mongo/mongo-sink/src/main/java/com/dtstack/flink/sql/sink/mongo/table/MongoTableInfo.java @@ -19,7 +19,7 @@ package com.dtstack.flink.sql.sink.mongo.table; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import com.google.common.base.Preconditions; /** @@ -30,7 +30,7 @@ */ -public class MongoTableInfo extends TargetTableInfo { +public class MongoTableInfo extends AbstractTargetTableInfo { private static final String CURR_TYPE = "mongo"; diff --git a/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllReqRow.java b/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllReqRow.java index 8cf6ab597..eb294e64e 100644 --- a/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllReqRow.java +++ b/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllReqRow.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.DtStringUtil; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -49,7 +49,7 @@ public class MysqlAllReqRow extends AbstractRdbAllReqRow { private static final String MYSQL_DRIVER = "com.mysql.jdbc.Driver"; - public MysqlAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public MysqlAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new MysqlAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllSideInfo.java b/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllSideInfo.java index 81193c0f9..89a62a5ef 100644 --- a/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllSideInfo.java +++ b/mysql/mysql-side/mysql-all-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAllSideInfo.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.RdbAllSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -35,7 +35,7 @@ */ public class MysqlAllSideInfo extends RdbAllSideInfo { - public MysqlAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public MysqlAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncReqRow.java b/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncReqRow.java index 2d2f5a72a..290f02e2b 100644 --- a/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncReqRow.java +++ b/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncReqRow.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncReqRow; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import io.vertx.core.Vertx; @@ -49,7 +49,7 @@ public class MysqlAsyncReqRow extends RdbAsyncReqRow { private final static String MYSQL_DRIVER = "com.mysql.jdbc.Driver"; - public MysqlAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public MysqlAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new MysqlAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncSideInfo.java b/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncSideInfo.java index f72671ce0..8fe10d06d 100644 --- a/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncSideInfo.java +++ b/mysql/mysql-side/mysql-async-side/src/main/java/com/dtstack/flink/sql/side/mysql/MysqlAsyncSideInfo.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -36,7 +36,7 @@ public class MysqlAsyncSideInfo extends RdbAsyncSideInfo { - public MysqlAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public MysqlAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/mysql/mysql-side/mysql-side-core/src/main/java/com/dtstack/flink/sql/side/mysql/table/MysqlSideParser.java b/mysql/mysql-side/mysql-side-core/src/main/java/com/dtstack/flink/sql/side/mysql/table/MysqlSideParser.java index 40f68e7e4..b0a38e344 100644 --- a/mysql/mysql-side/mysql-side-core/src/main/java/com/dtstack/flink/sql/side/mysql/table/MysqlSideParser.java +++ b/mysql/mysql-side/mysql-side-core/src/main/java/com/dtstack/flink/sql/side/mysql/table/MysqlSideParser.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.side.mysql.table; import com.dtstack.flink.sql.side.rdb.table.RdbSideParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; @@ -37,8 +37,8 @@ public class MysqlSideParser extends RdbSideParser { private static final String CURR_TYPE = "mysql"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo mysqlTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo mysqlTableInfo = super.getTableInfo(tableName, fieldsInfo, props); mysqlTableInfo.setType(CURR_TYPE); return mysqlTableInfo; } diff --git a/mysql/mysql-sink/src/main/java/com/dtstack/flink/sql/sink/mysql/table/MysqlSinkParser.java b/mysql/mysql-sink/src/main/java/com/dtstack/flink/sql/sink/mysql/table/MysqlSinkParser.java index 2247eb8cc..49105a7a8 100644 --- a/mysql/mysql-sink/src/main/java/com/dtstack/flink/sql/sink/mysql/table/MysqlSinkParser.java +++ b/mysql/mysql-sink/src/main/java/com/dtstack/flink/sql/sink/mysql/table/MysqlSinkParser.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.sink.mysql.table; import com.dtstack.flink.sql.sink.rdb.table.RdbSinkParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; @@ -36,8 +36,8 @@ public class MysqlSinkParser extends RdbSinkParser { private static final String CURR_TYPE = "mysql"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo mysqlTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo mysqlTableInfo = super.getTableInfo(tableName, fieldsInfo, props); mysqlTableInfo.setType(CURR_TYPE); return mysqlTableInfo; } diff --git a/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllReqRow.java b/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllReqRow.java index 5477514a5..3be6687ec 100644 --- a/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllReqRow.java +++ b/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllReqRow.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.DtStringUtil; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -42,7 +42,7 @@ public class OracleAllReqRow extends AbstractRdbAllReqRow { private static final String ORACLE_DRIVER = "oracle.jdbc.driver.OracleDriver"; - public OracleAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public OracleAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new OracleAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllSideInfo.java b/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllSideInfo.java index 361366929..342533681 100644 --- a/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllSideInfo.java +++ b/oracle/oracle-side/oracle-all-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAllSideInfo.java @@ -19,18 +19,17 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.RdbAllSideInfo; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import com.dtstack.flink.sql.util.DtStringUtil; -import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.java.typeutils.RowTypeInfo; import java.util.List; public class OracleAllSideInfo extends RdbAllSideInfo { - public OracleAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public OracleAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } diff --git a/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java b/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java index f6827f41b..c0b37e7ac 100644 --- a/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java +++ b/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncReqRow; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import io.vertx.core.Vertx; @@ -42,7 +42,7 @@ public class OracleAsyncReqRow extends RdbAsyncReqRow { private static final String ORACLE_DRIVER = "oracle.jdbc.driver.OracleDriver"; - public OracleAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public OracleAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new OracleAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncSideInfo.java b/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncSideInfo.java index b811cf783..8c620b013 100644 --- a/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncSideInfo.java +++ b/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncSideInfo.java @@ -20,22 +20,18 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncSideInfo; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import com.dtstack.flink.sql.util.DtStringUtil; -import com.dtstack.flink.sql.util.ParseUtils; -import org.apache.calcite.sql.SqlNode; import org.apache.flink.api.java.typeutils.RowTypeInfo; -import com.google.common.collect.Lists; -import java.util.Arrays; import java.util.List; public class OracleAsyncSideInfo extends RdbAsyncSideInfo { - public OracleAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public OracleAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } diff --git a/oracle/oracle-side/oracle-side-core/src/main/java/com/dtstack/flink/sql/side/oracle/table/OracleSideParser.java b/oracle/oracle-side/oracle-side-core/src/main/java/com/dtstack/flink/sql/side/oracle/table/OracleSideParser.java index f9124bd10..897b77d30 100644 --- a/oracle/oracle-side/oracle-side-core/src/main/java/com/dtstack/flink/sql/side/oracle/table/OracleSideParser.java +++ b/oracle/oracle-side/oracle-side-core/src/main/java/com/dtstack/flink/sql/side/oracle/table/OracleSideParser.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.side.oracle.table; import com.dtstack.flink.sql.side.rdb.table.RdbSideParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; @@ -27,8 +27,8 @@ public class OracleSideParser extends RdbSideParser { private static final String CURR_TYPE = "oracle"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo oracleTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo oracleTableInfo = super.getTableInfo(tableName, fieldsInfo, props); oracleTableInfo.setType(CURR_TYPE); return oracleTableInfo; } diff --git a/oracle/oracle-sink/src/main/java/com/dtstack/flink/sql/sink/oracle/table/OracleSinkParser.java b/oracle/oracle-sink/src/main/java/com/dtstack/flink/sql/sink/oracle/table/OracleSinkParser.java index aff096bd3..1732c8cc2 100644 --- a/oracle/oracle-sink/src/main/java/com/dtstack/flink/sql/sink/oracle/table/OracleSinkParser.java +++ b/oracle/oracle-sink/src/main/java/com/dtstack/flink/sql/sink/oracle/table/OracleSinkParser.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.sink.oracle.table; import com.dtstack.flink.sql.sink.rdb.table.RdbSinkParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; @@ -33,8 +33,8 @@ public class OracleSinkParser extends RdbSinkParser { private static final String CURR_TYPE = "oracle"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo oracleTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo oracleTableInfo = super.getTableInfo(tableName, fieldsInfo, props); oracleTableInfo.setType(CURR_TYPE); return oracleTableInfo; } diff --git a/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllReqRow.java b/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllReqRow.java index a5eec511b..5fbf0824b 100644 --- a/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllReqRow.java +++ b/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllReqRow.java @@ -19,7 +19,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.DtStringUtil; import com.google.common.collect.Maps; @@ -45,7 +45,7 @@ public class PolardbAllReqRow extends AbstractRdbAllReqRow { private static final String POLARDB_DRIVER = "com.mysql.cj.jdbc.Driver"; - public PolardbAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public PolardbAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new PolardbAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllSideInfo.java b/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllSideInfo.java index fa2ad63e7..8f30c8dd3 100644 --- a/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllSideInfo.java +++ b/polardb/polardb-side/polardb-all-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAllSideInfo.java @@ -19,7 +19,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.RdbAllSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -31,7 +31,7 @@ * @author yinxi */ public class PolardbAllSideInfo extends RdbAllSideInfo { - public PolardbAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public PolardbAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java b/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java index 0f8f82738..286f17286 100644 --- a/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java +++ b/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java @@ -19,7 +19,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncReqRow; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import io.vertx.core.Vertx; @@ -44,7 +44,7 @@ public class PolardbAsyncReqRow extends RdbAsyncReqRow { private final static String POLARDB_DRIVER = "com.mysql.cj.jdbc.Driver"; - public PolardbAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public PolardbAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new PolardbAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncSideInfo.java b/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncSideInfo.java index f6afb8da8..b52d52390 100644 --- a/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncSideInfo.java +++ b/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncSideInfo.java @@ -19,7 +19,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -32,7 +32,7 @@ */ public class PolardbAsyncSideInfo extends RdbAsyncSideInfo { - public PolardbAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public PolardbAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/polardb/polardb-side/polardb-side-core/src/main/java/com/dtstack/flink/sql/side/polardb/table/PolardbSideParser.java b/polardb/polardb-side/polardb-side-core/src/main/java/com/dtstack/flink/sql/side/polardb/table/PolardbSideParser.java index 1afb661f0..71aad7cbb 100644 --- a/polardb/polardb-side/polardb-side-core/src/main/java/com/dtstack/flink/sql/side/polardb/table/PolardbSideParser.java +++ b/polardb/polardb-side/polardb-side-core/src/main/java/com/dtstack/flink/sql/side/polardb/table/PolardbSideParser.java @@ -19,7 +19,7 @@ package com.dtstack.flink.sql.side.polardb.table; import com.dtstack.flink.sql.side.rdb.table.RdbSideParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; @@ -32,8 +32,8 @@ public class PolardbSideParser extends RdbSideParser { private static final String CURR_TYPE = "polardb"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo mysqlTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo mysqlTableInfo = super.getTableInfo(tableName, fieldsInfo, props); mysqlTableInfo.setType(CURR_TYPE); return mysqlTableInfo; } diff --git a/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/table/PolardbSinkParser.java b/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/table/PolardbSinkParser.java index a57089941..b4f02665e 100644 --- a/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/table/PolardbSinkParser.java +++ b/polardb/polardb-sink/src/main/java/com/dtstack/flink/sql/sink/polardb/table/PolardbSinkParser.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.sink.polardb.table; import com.dtstack.flink.sql.sink.rdb.table.RdbSinkParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; @@ -31,8 +31,8 @@ public class PolardbSinkParser extends RdbSinkParser { private static final String CURR_TYPE = "polardb"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo polardbTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo polardbTableInfo = super.getTableInfo(tableName, fieldsInfo, props); polardbTableInfo.setType(CURR_TYPE); return polardbTableInfo; } diff --git a/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllReqRow.java b/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllReqRow.java index 1c9aecfe1..0ec8f96ce 100644 --- a/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllReqRow.java +++ b/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllReqRow.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.DtStringUtil; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -49,7 +49,7 @@ public class PostgresqlAllReqRow extends AbstractRdbAllReqRow { private static final String POSTGRESQL_DRIVER = "org.postgresql.Driver"; - public PostgresqlAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public PostgresqlAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new PostgresqlAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllSideInfo.java b/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllSideInfo.java index d383ee46d..a4a52eabc 100644 --- a/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllSideInfo.java +++ b/postgresql/postgresql-side/postgresql-all-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAllSideInfo.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.RdbAllSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -35,7 +35,7 @@ */ public class PostgresqlAllSideInfo extends RdbAllSideInfo { - public PostgresqlAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public PostgresqlAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java b/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java index a26c8b229..2a9e0137c 100644 --- a/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java +++ b/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java @@ -21,7 +21,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncReqRow; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import io.vertx.core.Vertx; @@ -48,7 +48,7 @@ public class PostgresqlAsyncReqRow extends RdbAsyncReqRow { private final static String POSTGRESQL_DRIVER = "org.postgresql.Driver"; - public PostgresqlAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public PostgresqlAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new PostgresqlAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncSideInfo.java b/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncSideInfo.java index 1d89f4894..f19a488bc 100644 --- a/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncSideInfo.java +++ b/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncSideInfo.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -36,7 +36,7 @@ public class PostgresqlAsyncSideInfo extends RdbAsyncSideInfo { - public PostgresqlAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public PostgresqlAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/postgresql/postgresql-side/postgresql-side-core/src/main/java/com/dtstack/flink/sql/side/postgresql/table/PostgresqlSideParser.java b/postgresql/postgresql-side/postgresql-side-core/src/main/java/com/dtstack/flink/sql/side/postgresql/table/PostgresqlSideParser.java index faee2c704..be0c5ea8b 100644 --- a/postgresql/postgresql-side/postgresql-side-core/src/main/java/com/dtstack/flink/sql/side/postgresql/table/PostgresqlSideParser.java +++ b/postgresql/postgresql-side/postgresql-side-core/src/main/java/com/dtstack/flink/sql/side/postgresql/table/PostgresqlSideParser.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.side.postgresql.table; import com.dtstack.flink.sql.side.rdb.table.RdbSideParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; @@ -37,8 +37,8 @@ public class PostgresqlSideParser extends RdbSideParser { private static final String CURR_TYPE = "postgresql"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo pgTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo pgTableInfo = super.getTableInfo(tableName, fieldsInfo, props); pgTableInfo.setType(CURR_TYPE); return pgTableInfo; } diff --git a/postgresql/postgresql-sink/src/main/java/com/dtstack/flink/sql/sink/postgresql/table/PostgresqlSinkParser.java b/postgresql/postgresql-sink/src/main/java/com/dtstack/flink/sql/sink/postgresql/table/PostgresqlSinkParser.java index 5943b5e52..e94cfc8a0 100644 --- a/postgresql/postgresql-sink/src/main/java/com/dtstack/flink/sql/sink/postgresql/table/PostgresqlSinkParser.java +++ b/postgresql/postgresql-sink/src/main/java/com/dtstack/flink/sql/sink/postgresql/table/PostgresqlSinkParser.java @@ -20,7 +20,7 @@ package com.dtstack.flink.sql.sink.postgresql.table; import com.dtstack.flink.sql.sink.rdb.table.RdbSinkParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; @@ -35,8 +35,8 @@ public class PostgresqlSinkParser extends RdbSinkParser { private static final String CURR_TYPE = "postgresql"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo pgTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo pgTableInfo = super.getTableInfo(tableName, fieldsInfo, props); pgTableInfo.setType(CURR_TYPE); return pgTableInfo; } diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java index aeb88f7a3..e2452f73b 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java @@ -18,8 +18,8 @@ package com.dtstack.flink.sql.side.rdb.all; -import com.dtstack.flink.sql.side.AllReqRow; -import com.dtstack.flink.sql.side.SideInfo; +import com.dtstack.flink.sql.side.BaseAllReqRow; +import com.dtstack.flink.sql.side.BaseSideInfo; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import com.dtstack.flink.sql.side.rdb.util.SwitchUtil; import org.apache.calcite.sql.JoinType; @@ -51,7 +51,7 @@ * @author maqi */ -public abstract class AbstractRdbAllReqRow extends AllReqRow { +public abstract class AbstractRdbAllReqRow extends BaseAllReqRow { private static final long serialVersionUID = 2098635140857937718L; @@ -61,7 +61,7 @@ public abstract class AbstractRdbAllReqRow extends AllReqRow { private AtomicReference>>> cacheRef = new AtomicReference<>(); - public AbstractRdbAllReqRow(SideInfo sideInfo) { + public AbstractRdbAllReqRow(BaseSideInfo sideInfo) { super(sideInfo); } diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllSideInfo.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllSideInfo.java index 619d08529..1751b12aa 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllSideInfo.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllSideInfo.java @@ -21,8 +21,8 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.PredicateInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import com.dtstack.flink.sql.util.ParseUtils; import org.apache.calcite.sql.SqlNode; @@ -43,17 +43,17 @@ * @author maqi */ -public class RdbAllSideInfo extends SideInfo { +public class RdbAllSideInfo extends BaseSideInfo { private static final long serialVersionUID = -5858335638589472159L; - public RdbAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public RdbAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideTableInfo; sqlCondition = getSelectFromStatement(getTableName(rdbSideTableInfo), Arrays.asList(sideSelectFields.split(",")), sideTableInfo.getPredicateInfoes()); System.out.println("--------dimension sql query-------\n" + sqlCondition); diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java index bf7f8f774..f78c30c30 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java @@ -20,9 +20,9 @@ package com.dtstack.flink.sql.side.rdb.async; import com.dtstack.flink.sql.enums.ECacheContentType; -import com.dtstack.flink.sql.side.AsyncReqRow; +import com.dtstack.flink.sql.side.BaseAsyncReqRow; import com.dtstack.flink.sql.side.CacheMissVal; -import com.dtstack.flink.sql.side.SideInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; import com.dtstack.flink.sql.side.cache.CacheObj; import com.dtstack.flink.sql.side.rdb.util.SwitchUtil; import io.vertx.core.json.JsonArray; @@ -47,7 +47,7 @@ * @author maqi */ -public class RdbAsyncReqRow extends AsyncReqRow { +public class RdbAsyncReqRow extends BaseAsyncReqRow { private static final long serialVersionUID = 2098635244857937720L; @@ -69,7 +69,7 @@ public class RdbAsyncReqRow extends AsyncReqRow { private transient SQLClient rdbSqlClient; - public RdbAsyncReqRow(SideInfo sideInfo) { + public RdbAsyncReqRow(BaseSideInfo sideInfo) { super(sideInfo); } diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncSideInfo.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncSideInfo.java index 0fe3eb0b9..d79d27460 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncSideInfo.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncSideInfo.java @@ -21,8 +21,8 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.PredicateInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import com.dtstack.flink.sql.util.ParseUtils; import org.apache.calcite.sql.SqlBasicCall; @@ -34,7 +34,6 @@ import java.util.Arrays; import java.util.List; -import java.util.Map; import java.util.stream.Collectors; @@ -46,16 +45,16 @@ * @author maqi */ -public class RdbAsyncSideInfo extends SideInfo { +public class RdbAsyncSideInfo extends BaseSideInfo { private static final long serialVersionUID = 1942629132469918611L; - public RdbAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public RdbAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideTableInfo; String sideTableName = joinInfo.getSideTableName(); diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideParser.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideParser.java index b9811b0ee..79942ae11 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideParser.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideParser.java @@ -19,8 +19,8 @@ package com.dtstack.flink.sql.side.rdb.table; -import com.dtstack.flink.sql.table.AbsSideTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractSideTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; @@ -33,10 +33,10 @@ * @author maqi */ -public class RdbSideParser extends AbsSideTableParser { +public class RdbSideParser extends AbstractSideTableParser { @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { RdbSideTableInfo rdbTableInfo = new RdbSideTableInfo(); rdbTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, rdbTableInfo); diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideTableInfo.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideTableInfo.java index 0dfbef325..506033007 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideTableInfo.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideTableInfo.java @@ -17,7 +17,7 @@ */ package com.dtstack.flink.sql.side.rdb.table; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.google.common.base.Preconditions; /** @@ -27,7 +27,7 @@ * * @author maqi */ -public class RdbSideTableInfo extends SideTableInfo { +public class RdbSideTableInfo extends AbstractSideTableInfo { private static final long serialVersionUID = -1L; diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/AbstractRdbSink.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/AbstractRdbSink.java index b7990f9f4..bdc7e3dbf 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/AbstractRdbSink.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/AbstractRdbSink.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.rdb.format.JDBCUpsertOutputFormat; import com.dtstack.flink.sql.sink.rdb.table.RdbTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -85,7 +85,7 @@ public AbstractRdbSink(JDBCDialect jdbcDialect) { } @Override - public AbstractRdbSink genStreamSink(TargetTableInfo targetTableInfo) { + public AbstractRdbSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { RdbTableInfo rdbTableInfo = (RdbTableInfo) targetTableInfo; this.batchNum = rdbTableInfo.getBatchSize() == null ? batchNum : rdbTableInfo.getBatchSize(); this.batchWaitInterval = rdbTableInfo.getBatchWaitInterval() == null ? diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/AbstractJDBCOutputFormat.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/AbstractJDBCOutputFormat.java index 3378ff3f2..2a3ce5e90 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/AbstractJDBCOutputFormat.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/AbstractJDBCOutputFormat.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.sink.rdb.format; -import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; +import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat; import com.dtstack.flink.sql.util.JDBCUtils; import org.apache.flink.configuration.Configuration; import org.apache.flink.types.Row; @@ -36,7 +36,7 @@ * @see Row * @see DriverManager */ -public abstract class AbstractJDBCOutputFormat extends DtRichOutputFormat { +public abstract class AbstractJDBCOutputFormat extends AbstractDtRichOutputFormat { private static final long serialVersionUID = 1L; public static final int DEFAULT_FLUSH_MAX_SIZE = 100; diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/table/RdbSinkParser.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/table/RdbSinkParser.java index 9bbff3b4d..aca7acb6c 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/table/RdbSinkParser.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/table/RdbSinkParser.java @@ -17,9 +17,8 @@ */ package com.dtstack.flink.sql.sink.rdb.table; -import com.dtstack.flink.sql.constrant.ConfigConstrant; -import com.dtstack.flink.sql.table.AbsTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; @@ -31,9 +30,9 @@ * * @author maqi */ -public class RdbSinkParser extends AbsTableParser { +public class RdbSinkParser extends AbstractTableParser { @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { RdbTableInfo rdbTableInfo = new RdbTableInfo(); rdbTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, rdbTableInfo); diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/table/RdbTableInfo.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/table/RdbTableInfo.java index d5ad4eab6..cf185308c 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/table/RdbTableInfo.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/table/RdbTableInfo.java @@ -18,12 +18,10 @@ package com.dtstack.flink.sql.sink.rdb.table; import com.dtstack.flink.sql.enums.EUpdateMode; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import java.util.Arrays; - /** * Reason: * Date: 2018/11/27 @@ -31,7 +29,7 @@ * * @author maqi */ -public class RdbTableInfo extends TargetTableInfo { +public class RdbTableInfo extends AbstractTargetTableInfo { public static final String URL_KEY = "url"; diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java index 373a59f6a..a2a915bb1 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.sink.rdb.writer; -import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; +import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat; import com.dtstack.flink.sql.sink.rdb.dialect.JDBCDialect; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.types.Row; @@ -59,7 +59,7 @@ public static AbstractUpsertWriter create( String[] partitionFields, boolean objectReuse, boolean allReplace, - DtRichOutputFormat metricOutputFormat) { + AbstractDtRichOutputFormat metricOutputFormat) { checkNotNull(keyFields); @@ -97,9 +97,9 @@ public static AbstractUpsertWriter create( private transient Map> keyToRows; private transient PreparedStatement deleteStatement; // only use metric - private transient DtRichOutputFormat metricOutputFormat; + private transient AbstractDtRichOutputFormat metricOutputFormat; - private AbstractUpsertWriter(int[] fieldTypes, int[] pkFields, int[] pkTypes, String deleteSql, boolean objectReuse, DtRichOutputFormat metricOutputFormat) { + private AbstractUpsertWriter(int[] fieldTypes, int[] pkFields, int[] pkTypes, String deleteSql, boolean objectReuse, AbstractDtRichOutputFormat metricOutputFormat) { this.fieldTypes = fieldTypes; this.pkFields = pkFields; this.pkTypes = pkTypes; @@ -217,7 +217,7 @@ private UpsertWriterUsingUpsertStatement( boolean objectReuse, String deleteSql, String upsertSql, - DtRichOutputFormat metricOutputFormat) { + AbstractDtRichOutputFormat metricOutputFormat) { super(fieldTypes, pkFields, pkTypes, deleteSql, objectReuse, metricOutputFormat); this.upsertSql = upsertSql; } @@ -275,7 +275,7 @@ private UpsertWriterUsingInsertUpdateStatement( String existSql, String insertSql, String updateSql, - DtRichOutputFormat metricOutputFormat) { + AbstractDtRichOutputFormat metricOutputFormat) { super(fieldTypes, pkFields, pkTypes, deleteSql, objectReuse, metricOutputFormat); this.existSql = existSql; this.insertSql = insertSql; diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java index 2aa7b01f9..341ecf983 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.sink.rdb.writer; -import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; +import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.types.Row; import org.slf4j.Logger; @@ -48,9 +48,9 @@ public class AppendOnlyWriter implements JDBCWriter { private transient PreparedStatement statement; private transient List rows; // only use metric - private transient DtRichOutputFormat metricOutputFormat; + private transient AbstractDtRichOutputFormat metricOutputFormat; - public AppendOnlyWriter(String insertSql, int[] fieldTypes, DtRichOutputFormat metricOutputFormat) { + public AppendOnlyWriter(String insertSql, int[] fieldTypes, AbstractDtRichOutputFormat metricOutputFormat) { this.insertSql = insertSql; this.fieldTypes = fieldTypes; this.metricOutputFormat = metricOutputFormat; diff --git a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java index a215c9d75..8f884ca03 100644 --- a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java +++ b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java @@ -42,7 +42,7 @@ /** * @author yanxi */ -public class RedisAllReqRow extends AllReqRow{ +public class RedisAllReqRow extends BaseAllReqRow { private static final long serialVersionUID = 7578879189085344807L; @@ -60,7 +60,7 @@ public class RedisAllReqRow extends AllReqRow{ private RedisSideReqRow redisSideReqRow; - public RedisAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public RedisAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new RedisAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); this.redisSideReqRow = new RedisSideReqRow(super.sideInfo); } diff --git a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllSideInfo.java b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllSideInfo.java index fc24bc4ef..ec0fa0d15 100644 --- a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllSideInfo.java +++ b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllSideInfo.java @@ -20,8 +20,8 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.util.ParseUtils; import org.apache.calcite.sql.SqlNode; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -31,16 +31,16 @@ /** * @author yanxi */ -public class RedisAllSideInfo extends SideInfo { +public class RedisAllSideInfo extends BaseSideInfo { private static final long serialVersionUID = 1998703966487857613L; - public RedisAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public RedisAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { String sideTableName = joinInfo.getSideTableName(); SqlNode conditionNode = joinInfo.getCondition(); diff --git a/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java b/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java index ee951a928..119ff5634 100644 --- a/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java +++ b/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java @@ -46,7 +46,7 @@ /** * @author yanxi */ -public class RedisAsyncReqRow extends AsyncReqRow { +public class RedisAsyncReqRow extends BaseAsyncReqRow { private static final long serialVersionUID = -2079908694523987738L; @@ -64,7 +64,7 @@ public class RedisAsyncReqRow extends AsyncReqRow { private RedisSideReqRow redisSideReqRow; - public RedisAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public RedisAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new RedisAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); redisSideReqRow = new RedisSideReqRow(super.sideInfo); } diff --git a/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncSideInfo.java b/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncSideInfo.java index 2736fc8a7..c23035ec7 100644 --- a/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncSideInfo.java +++ b/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncSideInfo.java @@ -20,8 +20,8 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.util.ParseUtils; import org.apache.calcite.sql.SqlNode; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -31,15 +31,15 @@ /** * @author yanxi */ -public class RedisAsyncSideInfo extends SideInfo { +public class RedisAsyncSideInfo extends BaseSideInfo { private static final long serialVersionUID = -4851348392924455039L; - public RedisAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public RedisAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { String sideTableName = joinInfo.getSideTableName(); SqlNode conditionNode = joinInfo.getCondition(); diff --git a/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideParser.java b/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideParser.java index 2d0fb77d4..514984f0a 100644 --- a/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideParser.java +++ b/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideParser.java @@ -18,8 +18,8 @@ package com.dtstack.flink.sql.side.redis.table; -import com.dtstack.flink.sql.table.AbsSideTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractSideTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; @@ -27,10 +27,10 @@ /** * @author yanxi */ -public class RedisSideParser extends AbsSideTableParser { +public class RedisSideParser extends AbstractSideTableParser { @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { RedisSideTableInfo redisSideTableInfo = new RedisSideTableInfo(); redisSideTableInfo.setName(tableName); diff --git a/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideReqRow.java b/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideReqRow.java index 5007868e9..17557f916 100644 --- a/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideReqRow.java +++ b/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideReqRow.java @@ -19,7 +19,7 @@ package com.dtstack.flink.sql.side.redis.table; import com.dtstack.flink.sql.side.ISideReqRow; -import com.dtstack.flink.sql.side.SideInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo; import org.apache.flink.types.Row; @@ -40,9 +40,9 @@ public class RedisSideReqRow implements ISideReqRow, Serializable { private static final long serialVersionUID = 3751171828444748982L; - private SideInfo sideInfo; + private BaseSideInfo sideInfo; - public RedisSideReqRow(SideInfo sideInfo){ + public RedisSideReqRow(BaseSideInfo sideInfo){ this.sideInfo = sideInfo; } @@ -72,7 +72,7 @@ public Row fillData(Row input, Object sideInput) { return row; } - public void setRowField(Row row, Integer index, SideInfo sideInfo, String value) { + public void setRowField(Row row, Integer index, BaseSideInfo sideInfo, String value) { Integer keyIndex = sideInfo.getSideFieldIndex().get(index); String classType = sideInfo.getSideTableInfo().getFieldClassList().get(keyIndex).getName(); switch (classType){ diff --git a/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideTableInfo.java b/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideTableInfo.java index ae6cdd017..5224134c8 100644 --- a/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideTableInfo.java +++ b/redis5/redis5-side/redis-side-core/src/main/java/com/dtstack/flink/sql/side/redis/table/RedisSideTableInfo.java @@ -18,12 +18,12 @@ package com.dtstack.flink.sql.side.redis.table; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.google.common.base.Preconditions; /** * @author yanxi */ -public class RedisSideTableInfo extends SideTableInfo { +public class RedisSideTableInfo extends AbstractSideTableInfo { private static final long serialVersionUID = -1L; diff --git a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java index bbf49dff7..a51c62417 100644 --- a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java +++ b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.sink.redis; -import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; +import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat; import org.apache.commons.pool2.impl.GenericObjectPoolConfig; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; @@ -43,7 +43,7 @@ /** * @author yanxi */ -public class RedisOutputFormat extends DtRichOutputFormat { +public class RedisOutputFormat extends AbstractDtRichOutputFormat { private static final Logger LOG = LoggerFactory.getLogger(RedisOutputFormat.class); private String url; diff --git a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisSink.java b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisSink.java index 007c7fd3b..cc49a3ba8 100644 --- a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisSink.java +++ b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisSink.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.redis.table.RedisTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -69,7 +69,7 @@ public RedisSink(){ } @Override - public RedisSink genStreamSink(TargetTableInfo targetTableInfo) { + public RedisSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { RedisTableInfo redisTableInfo = (RedisTableInfo) targetTableInfo; this.url = redisTableInfo.getUrl(); this.database = redisTableInfo.getDatabase(); diff --git a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisSinkParser.java b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisSinkParser.java index 201abab2c..825d6901e 100644 --- a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisSinkParser.java +++ b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisSinkParser.java @@ -18,8 +18,8 @@ package com.dtstack.flink.sql.sink.redis.table; -import com.dtstack.flink.sql.table.AbsTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; @@ -30,9 +30,9 @@ /** * @author yanxi */ -public class RedisSinkParser extends AbsTableParser { +public class RedisSinkParser extends AbstractTableParser { @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { RedisTableInfo redisTableInfo = new RedisTableInfo(); redisTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, redisTableInfo); diff --git a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisTableInfo.java b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisTableInfo.java index 5c9940252..2425f8c38 100644 --- a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisTableInfo.java +++ b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/table/RedisTableInfo.java @@ -18,12 +18,12 @@ package com.dtstack.flink.sql.sink.redis.table; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import com.google.common.base.Preconditions; /** * @author yanxi */ -public class RedisTableInfo extends TargetTableInfo { +public class RedisTableInfo extends AbstractTargetTableInfo { private static final String CURR_TYPE = "redis"; diff --git a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/CustomerSocketTextStreamFunction.java b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/CustomerSocketTextStreamFunction.java index 2f508b3e5..b032fdce6 100644 --- a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/CustomerSocketTextStreamFunction.java +++ b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/CustomerSocketTextStreamFunction.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.format.DeserializationMetricWrapper; import com.dtstack.flink.sql.format.dtnest.DtNestRowDeserializationSchema; import com.dtstack.flink.sql.source.serversocket.table.ServersocketSourceTableInfo; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.streaming.api.functions.source.SourceFunction; import org.apache.flink.types.Row; @@ -67,7 +67,7 @@ public class CustomerSocketTextStreamFunction implements SourceFunction { ServersocketSourceTableInfo tableInfo; public CustomerSocketTextStreamFunction(ServersocketSourceTableInfo tableInfo, TypeInformation typeInfo, - Map rowAndFieldMapping, List fieldExtraInfos) { + Map rowAndFieldMapping, List fieldExtraInfos) { this.tableInfo = tableInfo; this.deserializationSchema = new DtNestRowDeserializationSchema(typeInfo, rowAndFieldMapping, fieldExtraInfos); this.deserializationMetricWrapper = new DeserializationMetricWrapper(typeInfo, deserializationSchema); diff --git a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/ServersocketSource.java b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/ServersocketSource.java index 3ab01fb42..b4dda5f3b 100644 --- a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/ServersocketSource.java +++ b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/ServersocketSource.java @@ -19,7 +19,7 @@ import com.dtstack.flink.sql.source.IStreamSourceGener; import com.dtstack.flink.sql.source.serversocket.table.ServersocketSourceTableInfo; -import com.dtstack.flink.sql.table.SourceTableInfo; +import com.dtstack.flink.sql.table.AbstractSourceTableInfo; import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -37,7 +37,7 @@ */ public class ServersocketSource implements IStreamSourceGener
{ @Override - public Table genStreamSource(SourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { + public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { ServersocketSourceTableInfo serversocketSourceTableInfo = (ServersocketSourceTableInfo) sourceTableInfo; String tableName = serversocketSourceTableInfo.getName(); diff --git a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceParser.java b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceParser.java index b52e38e59..f2d861b17 100644 --- a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceParser.java +++ b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceParser.java @@ -17,8 +17,8 @@ */ package com.dtstack.flink.sql.source.serversocket.table; -import com.dtstack.flink.sql.table.AbsSourceParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractSourceParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; @@ -30,9 +30,9 @@ * * @author maqi */ -public class ServersocketSourceParser extends AbsSourceParser { +public class ServersocketSourceParser extends AbstractSourceParser { @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { ServersocketSourceTableInfo serversocketSourceTableInfo = new ServersocketSourceTableInfo(); serversocketSourceTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, serversocketSourceTableInfo); diff --git a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceTableInfo.java b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceTableInfo.java index 02f6e9f30..3123b477f 100644 --- a/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceTableInfo.java +++ b/serversocket/serversocket-source/src/main/java/com/dtstack/flink/sql/source/serversocket/table/ServersocketSourceTableInfo.java @@ -17,7 +17,7 @@ */ package com.dtstack.flink.sql.source.serversocket.table; -import com.dtstack.flink.sql.table.SourceTableInfo; +import com.dtstack.flink.sql.table.AbstractSourceTableInfo; import com.google.common.base.Preconditions; /** @@ -27,7 +27,7 @@ * * @author maqi */ -public class ServersocketSourceTableInfo extends SourceTableInfo { +public class ServersocketSourceTableInfo extends AbstractSourceTableInfo { private static final String CURR_TYPE = "serversocket"; diff --git a/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllReqRow.java b/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllReqRow.java index af1d94171..dbae271e4 100644 --- a/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllReqRow.java +++ b/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllReqRow.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.AbstractRdbAllReqRow; import com.dtstack.flink.sql.util.DtStringUtil; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -46,7 +46,7 @@ public class SqlserverAllReqRow extends AbstractRdbAllReqRow { private static final String SQLSERVER_DRIVER = "net.sourceforge.jtds.jdbc.Driver"; - public SqlserverAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public SqlserverAllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new SqlserverAllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllSideInfo.java b/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllSideInfo.java index 8f0fc63a4..a40af5697 100644 --- a/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllSideInfo.java +++ b/sqlserver/sqlserver-side/sqlserver-all-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAllSideInfo.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.all.RdbAllSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; import java.util.List; @@ -31,7 +31,7 @@ */ public class SqlserverAllSideInfo extends RdbAllSideInfo { - public SqlserverAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public SqlserverAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java b/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java index 070bfe334..f78061fca 100644 --- a/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java +++ b/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java @@ -44,7 +44,7 @@ public class SqlserverAsyncReqRow extends RdbAsyncReqRow { private final static String SQLSERVER_DRIVER = "net.sourceforge.jtds.jdbc.Driver"; - public SqlserverAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public SqlserverAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new SqlserverAsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncSideInfo.java b/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncSideInfo.java index d3c833c96..e8a487721 100644 --- a/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncSideInfo.java +++ b/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncSideInfo.java @@ -20,7 +20,7 @@ import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.rdb.async.RdbAsyncSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -34,7 +34,7 @@ */ public class SqlserverAsyncSideInfo extends RdbAsyncSideInfo { - public SqlserverAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public SqlserverAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } } diff --git a/sqlserver/sqlserver-side/sqlserver-side-core/src/main/java/com/dtstack/flink/sql/side/sqlserver/table/SqlserverSideParser.java b/sqlserver/sqlserver-side/sqlserver-side-core/src/main/java/com/dtstack/flink/sql/side/sqlserver/table/SqlserverSideParser.java index be8ebb152..e06c13898 100644 --- a/sqlserver/sqlserver-side/sqlserver-side-core/src/main/java/com/dtstack/flink/sql/side/sqlserver/table/SqlserverSideParser.java +++ b/sqlserver/sqlserver-side/sqlserver-side-core/src/main/java/com/dtstack/flink/sql/side/sqlserver/table/SqlserverSideParser.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.side.sqlserver.table; import com.dtstack.flink.sql.side.rdb.table.RdbSideParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; /** @@ -30,8 +30,8 @@ public class SqlserverSideParser extends RdbSideParser { private static final String CURR_TYPE = "sqlserver"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo sqlServerTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo sqlServerTableInfo = super.getTableInfo(tableName, fieldsInfo, props); sqlServerTableInfo.setType(CURR_TYPE); return sqlServerTableInfo; } diff --git a/sqlserver/sqlserver-sink/src/main/java/com/dtstack/flink/sql/sink/sqlserver/table/SqlserverSinkParser.java b/sqlserver/sqlserver-sink/src/main/java/com/dtstack/flink/sql/sink/sqlserver/table/SqlserverSinkParser.java index a695d6c3b..5300884bc 100644 --- a/sqlserver/sqlserver-sink/src/main/java/com/dtstack/flink/sql/sink/sqlserver/table/SqlserverSinkParser.java +++ b/sqlserver/sqlserver-sink/src/main/java/com/dtstack/flink/sql/sink/sqlserver/table/SqlserverSinkParser.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.sink.sqlserver.table; import com.dtstack.flink.sql.sink.rdb.table.RdbSinkParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; import java.util.Map; @@ -33,8 +33,8 @@ public class SqlserverSinkParser extends RdbSinkParser { private static final String CURR_TYPE = "sqlserver"; @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { - TableInfo sqlserverTableInfo = super.getTableInfo(tableName, fieldsInfo, props); + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + AbstractTableInfo sqlserverTableInfo = super.getTableInfo(tableName, fieldsInfo, props); sqlserverTableInfo.setType(CURR_TYPE); return sqlserverTableInfo; } From 9ae4741974591282538afc6c52dc480f2eb5f09f Mon Sep 17 00:00:00 2001 From: maqi Date: Thu, 5 Mar 2020 14:54:33 +0800 Subject: [PATCH 18/47] es id null check --- .../sql/sink/elasticsearch/CustomerSinkFunc.java | 13 ++++++++----- .../sql/sink/elasticsearch/CustomerSinkFunc.java | 13 ++++++++----- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java index 1f3efb8d7..0d97f5995 100644 --- a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java +++ b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java @@ -96,11 +96,14 @@ public void setOutRecords(Counter outRecords) { } private IndexRequest createIndexRequest(Row element) { - // index start at 1, - String idFieldStr = idFieldIndexList.stream() - .filter(index -> index > 0 && index <= element.getArity()) - .map(index -> element.getField(index - 1).toString()) - .collect(Collectors.joining(ID_VALUE_SPLIT)); + String idFieldStr = ""; + if (null != idFieldIndexList) { + // index start at 1, + idFieldStr = idFieldIndexList.stream() + .filter(index -> index > 0 && index <= element.getArity()) + .map(index -> element.getField(index - 1).toString()) + .collect(Collectors.joining(ID_VALUE_SPLIT)); + } Map dataMap = EsUtil.rowToJsonMap(element,fieldNames,fieldTypes); int length = Math.min(element.getArity(), fieldNames.size()); diff --git a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java index b1c8a5e97..ec1732d0e 100644 --- a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java +++ b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/CustomerSinkFunc.java @@ -95,11 +95,14 @@ public void setOutDirtyRecords(Counter outDirtyRecords) { } private IndexRequest createIndexRequest(Row element) { - // index start at 1, - String idFieldStr = idFieldIndexList.stream() - .filter(index -> index > 0 && index <= element.getArity()) - .map(index -> element.getField(index - 1).toString()) - .collect(Collectors.joining(ID_VALUE_SPLIT)); + String idFieldStr = ""; + if (null != idFieldIndexList) { + // index start at 1, + idFieldStr = idFieldIndexList.stream() + .filter(index -> index > 0 && index <= element.getArity()) + .map(index -> element.getField(index - 1).toString()) + .collect(Collectors.joining(ID_VALUE_SPLIT)); + } Map dataMap = Es6Util.rowToJsonMap(element, fieldNames, fieldTypes); int length = Math.min(element.getArity(), fieldNames.size()); From 228c935e6a62f7f875805f0dad3f3881698c4021 Mon Sep 17 00:00:00 2001 From: dapeng Date: Thu, 5 Mar 2020 17:13:43 +0800 Subject: [PATCH 19/47] =?UTF-8?q?=E6=97=A5=E5=BF=97=E5=9F=BA=E6=9C=AC?= =?UTF-8?q?=E4=BD=8D=E7=BD=AE=E8=B0=83=E6=95=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- core/pom.xml | 7 ++++++- core/src/main/java/com/dtstack/flink/sql/Main.java | 1 + .../dtstack/flink/sql/constrant/ConfigConstrant.java | 3 ++- .../dtstack/flink/sql/exec/ExecuteProcessHelper.java | 11 ++++++++++- launcher/pom.xml | 5 ----- .../com/dtstack/flink/sql/launcher/LauncherMain.java | 8 -------- pom.xml | 1 - 7 files changed, 19 insertions(+), 17 deletions(-) diff --git a/core/pom.xml b/core/pom.xml index 677f9d5ee..eb470a3c0 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -20,6 +20,7 @@ 1.16.0 2.7.9 19.0 + 1.0.0-SNAPSHOT @@ -121,7 +122,11 @@ junit 4.12 - + + com.aiweiergou + tools-logger + ${logger.tool.version} + diff --git a/core/src/main/java/com/dtstack/flink/sql/Main.java b/core/src/main/java/com/dtstack/flink/sql/Main.java index 5d7528869..fe925f9a6 100644 --- a/core/src/main/java/com/dtstack/flink/sql/Main.java +++ b/core/src/main/java/com/dtstack/flink/sql/Main.java @@ -38,6 +38,7 @@ public class Main { public static void main(String[] args) throws Exception { ParamsInfo paramsInfo = ExecuteProcessHelper.parseParams(args); + ExecuteProcessHelper.setLogLevel(paramsInfo); StreamExecutionEnvironment env = ExecuteProcessHelper.getStreamExecution(paramsInfo); env.execute(paramsInfo.getName()); LOG.info("program {} execution success", paramsInfo.getName()); diff --git a/core/src/main/java/com/dtstack/flink/sql/constrant/ConfigConstrant.java b/core/src/main/java/com/dtstack/flink/sql/constrant/ConfigConstrant.java index 8e5f34d80..d1885a570 100644 --- a/core/src/main/java/com/dtstack/flink/sql/constrant/ConfigConstrant.java +++ b/core/src/main/java/com/dtstack/flink/sql/constrant/ConfigConstrant.java @@ -65,7 +65,7 @@ public class ConfigConstrant { public static final String RESTOREENABLE = "restore.enable"; - public static final String LOG_LEVEL_KEY = "logLevel"; + public static final String LOG_LEVEL_KEY = "logLevel"; // restart plocy @@ -75,4 +75,5 @@ public class ConfigConstrant { public static final String DELAYINTERVAL= "delay.interval"; //sec + } diff --git a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java index 5cd1248e2..648745409 100644 --- a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java +++ b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java @@ -18,8 +18,10 @@ package com.dtstack.flink.sql.exec; +import com.aiweiergou.tool.logger.api.ChangeLogLevelProcess; import com.dtstack.flink.sql.classloader.ClassLoaderManager; import com.dtstack.flink.sql.config.CalciteConfig; +import com.dtstack.flink.sql.constrant.ConfigConstrant; import com.dtstack.flink.sql.enums.ClusterMode; import com.dtstack.flink.sql.enums.ECacheType; import com.dtstack.flink.sql.enums.EPluginLoadMode; @@ -353,5 +355,12 @@ public static StreamExecutionEnvironment getStreamExeEnv(Properties confProperti return env; } - + public static void setLogLevel(ParamsInfo paramsInfo){ + String logLevel = paramsInfo.getConfProp().getProperty(ConfigConstrant.LOG_LEVEL_KEY); + if(org.apache.commons.lang3.StringUtils.isBlank(logLevel)){ + return; + } + ChangeLogLevelProcess logLevelProcess = new ChangeLogLevelProcess(); + logLevelProcess.process(logLevel); + } } \ No newline at end of file diff --git a/launcher/pom.xml b/launcher/pom.xml index 8c1ce53d5..1771df1ec 100644 --- a/launcher/pom.xml +++ b/launcher/pom.xml @@ -34,11 +34,6 @@ fastjson 1.2.7 - - com.aiweiergou - tools-logger - ${logger.tool.version} - diff --git a/launcher/src/main/java/com/dtstack/flink/sql/launcher/LauncherMain.java b/launcher/src/main/java/com/dtstack/flink/sql/launcher/LauncherMain.java index 95afc86cd..f8338d2d4 100644 --- a/launcher/src/main/java/com/dtstack/flink/sql/launcher/LauncherMain.java +++ b/launcher/src/main/java/com/dtstack/flink/sql/launcher/LauncherMain.java @@ -149,12 +149,4 @@ private static String[] parseJson(String[] args) { String[] array = list.toArray(new String[list.size()]); return array; } - - private static void setLogLevel(String logLevel){ - if(org.apache.commons.lang3.StringUtils.isBlank(logLevel)){ - return; - } - ChangeLogLevelProcess logLevelProcess = new ChangeLogLevelProcess(); - logLevelProcess.process(logLevel); - } } diff --git a/pom.xml b/pom.xml index b0ba9f47d..5493645e9 100644 --- a/pom.xml +++ b/pom.xml @@ -41,7 +41,6 @@ UTF-8 1.8.1 - 1.0.0-SNAPSHOT From 02e896e35a5cf597c85b4ff06314519a898dcf92 Mon Sep 17 00:00:00 2001 From: dapeng Date: Thu, 5 Mar 2020 17:21:57 +0800 Subject: [PATCH 20/47] =?UTF-8?q?=E7=A7=BB=E9=99=A4launcher=E7=9A=84?= =?UTF-8?q?=E6=97=A5=E5=BF=97=E8=AE=BE=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../main/java/com/dtstack/flink/sql/launcher/LauncherMain.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/launcher/src/main/java/com/dtstack/flink/sql/launcher/LauncherMain.java b/launcher/src/main/java/com/dtstack/flink/sql/launcher/LauncherMain.java index f8338d2d4..0b036c87c 100644 --- a/launcher/src/main/java/com/dtstack/flink/sql/launcher/LauncherMain.java +++ b/launcher/src/main/java/com/dtstack/flink/sql/launcher/LauncherMain.java @@ -84,8 +84,6 @@ public static void main(String[] args) throws Exception { confProp = URLDecoder.decode(confProp, Charsets.UTF_8.toString()); Properties confProperties = PluginUtil.jsonStrToObject(confProp, Properties.class); - setLogLevel(confProperties.getProperty(ConfigConstrant.LOG_LEVEL_KEY)); - if(mode.equals(ClusterMode.local.name())) { String[] localArgs = argList.toArray(new String[argList.size()]); Main.main(localArgs); From 2a0663e8302fe3111c310183f56e3c5e1d27717d Mon Sep 17 00:00:00 2001 From: dapeng Date: Fri, 6 Mar 2020 13:14:47 +0800 Subject: [PATCH 21/47] codeview --- .../com/dtstack/flink/sql/classloader/ClassLoaderManager.java | 4 ++-- .../java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java | 4 ++-- .../main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java | 4 ++-- .../main/java/com/dtstack/flink/sql/side/ParserJoinField.java | 2 ++ 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderManager.java b/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderManager.java index c03f60617..2e62e11ab 100644 --- a/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderManager.java +++ b/core/src/main/java/com/dtstack/flink/sql/classloader/ClassLoaderManager.java @@ -98,9 +98,9 @@ public static List getClassPath() { - public static URLClassLoader loadExtraJar(List jarURLList, URLClassLoader classLoader) + public static URLClassLoader loadExtraJar(List jarUrlList, URLClassLoader classLoader) throws IllegalAccessException, InvocationTargetException { - for(URL url : jarURLList){ + for(URL url : jarUrlList){ if(url.toString().endsWith(".jar")){ urlClassLoaderAddUrl(classLoader, url); } diff --git a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java index ed54a9c48..40729d257 100644 --- a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java +++ b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java @@ -117,7 +117,7 @@ public static ParamsInfo parseParams(String[] args) throws Exception { String confProp = URLDecoder.decode(options.getConfProp(), Charsets.UTF_8.toString()); Properties confProperties = PluginUtil.jsonStrToObject(confProp, Properties.class); - List jarURList = getExternalJarUrls(options.getAddjar()); + List jarUrlList = getExternalJarUrls(options.getAddjar()); return ParamsInfo.builder() .setSql(sql) @@ -127,7 +127,7 @@ public static ParamsInfo parseParams(String[] args) throws Exception { .setPluginLoadMode(pluginLoadMode) .setDeployMode(deployMode) .setConfProp(confProperties) - .setJarUrlList(jarURList) + .setJarUrlList(jarUrlList) .setLogLevel(logLevel) .build(); diff --git a/core/src/main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java b/core/src/main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java index 8a6851add..357c65571 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java @@ -26,8 +26,8 @@ import org.apache.flink.table.runtime.types.CRow; import java.sql.SQLException; -import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; /** @@ -60,7 +60,7 @@ public void open(Configuration parameters) throws Exception { //start reload cache thread AbstractSideTableInfo sideTableInfo = sideInfo.getSideTableInfo(); - es = Executors.newSingleThreadScheduledExecutor(new DTThreadFactory("cache-all-reload")); + es = new ScheduledThreadPoolExecutor(1,new DTThreadFactory("cache-all-reload")); es.scheduleAtFixedRate(() -> reloadCache(), sideTableInfo.getCacheTimeout(), sideTableInfo.getCacheTimeout(), TimeUnit.MILLISECONDS); } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/ParserJoinField.java b/core/src/main/java/com/dtstack/flink/sql/side/ParserJoinField.java index 738e0f84e..d9d79f34e 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/ParserJoinField.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/ParserJoinField.java @@ -80,6 +80,7 @@ public static List getRowTypeInfo(SqlNode sqlNode, JoinScope scope, b switch(identifierSize) { case 1: fieldInfoList.addAll(getAllField(scope)); + break; default: SqlIdentifier tableIdentify = identifier.skipLast(1); JoinScope.ScopeChild scopeChild = scope.getScope(tableIdentify.getSimple()); @@ -99,6 +100,7 @@ public static List getRowTypeInfo(SqlNode sqlNode, JoinScope scope, b fieldInfo.setTypeInformation(type); fieldInfoList.add(fieldInfo); } + break; } } } From 78b3a1a4eab7812892e15ed2f773b5002c2ede0f Mon Sep 17 00:00:00 2001 From: dapeng Date: Fri, 6 Mar 2020 17:05:29 +0800 Subject: [PATCH 22/47] =?UTF-8?q?=E6=97=B6=E9=97=B4=E6=A0=BC=E5=BC=8F?= =?UTF-8?q?=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../com/dtstack/flink/sql/util/DateUtil.java | 43 ++++++++++++------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java index c942fb064..462eed30b 100644 --- a/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java +++ b/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java @@ -23,6 +23,12 @@ import java.sql.Timestamp; import java.text.ParseException; import java.text.SimpleDateFormat; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; import java.util.Calendar; import java.util.Date; import java.util.Locale; @@ -39,12 +45,9 @@ */ public class DateUtil { - static final String datetimeFormat = "yyyy-MM-dd HH:mm:ss"; - static final String dateFormat = "yyyy-MM-dd"; - static final String timeFormat = "HH:mm:ss"; - static final SimpleDateFormat datetimeFormatter = new SimpleDateFormat(datetimeFormat); - static final SimpleDateFormat dateFormatter = new SimpleDateFormat(dateFormat); - static final SimpleDateFormat timeFormatter = new SimpleDateFormat(timeFormat); + static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"); + static final DateTimeFormatter DATE_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd"); + static final DateTimeFormatter TIME_FORMATTER = DateTimeFormatter.ofPattern("HH:mm:ss"); public static java.sql.Date columnToDate(Object column) { if(column instanceof String) { @@ -69,23 +72,32 @@ public static Date stringToDate(String strDate) { return null; } try { - return datetimeFormatter.parse(strDate); - } catch (ParseException ignored) { + ; + return localDateTimetoDate(LocalDateTime.parse(strDate, DATE_TIME_FORMATTER)); + } catch (DateTimeParseException ignored) { } try { - return dateFormatter.parse(strDate); - } catch (ParseException ignored) { + return localDateTimetoDate(LocalDate.parse(strDate, DATE_FORMATTER).atStartOfDay()); + } catch (DateTimeParseException ignored) { } try { - return timeFormatter.parse(strDate); - } catch (ParseException ignored) { + return localDateTimetoDate(LocalDateTime.of(LocalDate.now(), LocalTime.parse(strDate, TIME_FORMATTER))); + } catch (DateTimeParseException ignored) { } throw new RuntimeException("can't parse date"); } + public static Date localDateTimetoDate(LocalDateTime localDateTime){ + return Date.from(localDateTime.atZone(ZoneId.systemDefault()).toInstant()); + } + + public static LocalDateTime dateToLocalDateTime(Date date){ + return date.toInstant().atZone(ZoneId.systemDefault()).toLocalDateTime(); + } + /** * * @@ -762,11 +774,12 @@ public static java.sql.Timestamp columnToTimestamp(Object column) { } public static String dateToString(Date date) { - return dateFormatter.format(date); + LocalDateTime localDateTime = dateToLocalDateTime(date); + return localDateTime.format(DATE_FORMATTER); } public static String timestampToString(Date date) { - return datetimeFormatter.format(date); + LocalDateTime localDateTime = dateToLocalDateTime(date); + return localDateTime.format(DATE_TIME_FORMATTER); } - } From 02f2c37407225332bfcadb05f279e7042c3be5f9 Mon Sep 17 00:00:00 2001 From: dapeng Date: Fri, 6 Mar 2020 17:12:50 +0800 Subject: [PATCH 23/47] =?UTF-8?q?=E6=8F=90=E4=BA=A4=E9=81=97=E6=BC=8F?= =?UTF-8?q?=E7=9A=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../flink/sql/sink/console/table/TablePrintUtil.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java index c0d35404d..6ddc60386 100644 --- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java +++ b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java @@ -22,6 +22,7 @@ public class TablePrintUtil { public static final int ALIGN_LEFT = 1;//左对齐 public static final int ALIGN_RIGHT = 2;//右对齐 public static final int ALIGN_CENTER = 3;//居中对齐 + private static final Pattern PATTERN = Pattern.compile("[\u4e00-\u9fa5]"); private int align = ALIGN_CENTER;//默认居中对齐 private boolean equilong = false;//默认不等宽 @@ -138,8 +139,8 @@ private static ListgetColList(Object obj) { * @return */ private int getStringCharLength(String str) { - Pattern p = Pattern.compile("[\u4e00-\u9fa5]");//利用正则找到中文 - Matcher m = p.matcher(str); + //利用正则找到中文 + Matcher m = PATTERN.matcher(str); int count = 0; while (m.find()) { count++; @@ -219,6 +220,8 @@ public String getTableString() { sb.append(cell); for (int i = 0; i < right + padding; i++) {sb.append(s);} break; + default: + break; } sb.append(v); } From 4c7f9867045c5e3f94906e891ca354b6753561fb Mon Sep 17 00:00:00 2001 From: zoudaokoulife Date: Tue, 10 Mar 2020 15:45:22 +0800 Subject: [PATCH 24/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=90=88=E5=B9=B6?= =?UTF-8?q?=E5=86=B2=E7=AA=81=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../flink/sql/exec/ExecuteProcessHelper.java | 2 +- .../dtstack/flink/sql/side/SideSqlExec.java | 4 ++-- .../sql/table/AbstractSideTableParser.java | 4 ++-- .../sink/elasticsearch/ElasticsearchSink.java | 2 +- .../Elasticsearch6AllReqRow.java | 21 ++++++++----------- .../Elasticsearch6AllSideInfo.java | 11 +++++----- .../Elasticsearch6AsyncReqRow.java | 4 ++-- .../Elasticsearch6AsyncSideInfo.java | 13 ++++++------ .../table/Elasticsearch6SideParser.java | 8 +++---- .../table/Elasticsearch6SideTableInfo.java | 4 ++-- .../sql/side/elasticsearch6/util/Es6Util.java | 10 ++++----- .../sink/elasticsearch/ElasticsearchSink.java | 4 ++-- .../table/ElasticsearchSinkParser.java | 8 +++---- .../table/ElasticsearchTableInfo.java | 4 ++-- .../sql/sink/hbase/HbaseOutputFormat.java | 7 +------ .../flink/sql/sink/kafka/KafkaSink.java | 1 + .../flink/sql/sink/kafka/KafkaSink.java | 1 + .../flink/sql/sink/kafka/KafkaSink.java | 1 + .../flink/sql/sink/kafka/KafkaSink.java | 1 + .../flink/sql/side/mongo/MongoAllReqRow.java | 5 +++-- .../side/rdb/all/AbstractRdbAllReqRow.java | 3 +-- .../sink/rdb/writer/AbstractUpsertWriter.java | 10 ++++----- .../sql/sink/rdb/writer/AppendOnlyWriter.java | 2 +- .../sql/side/redis/RedisAsyncReqRow.java | 4 ++-- 24 files changed, 65 insertions(+), 69 deletions(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java index 85568f19e..c02c3b142 100644 --- a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java +++ b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java @@ -127,7 +127,7 @@ public static ParamsInfo parseParams(String[] args) throws Exception { .setPluginLoadMode(pluginLoadMode) .setDeployMode(deployMode) .setConfProp(confProperties) - .setJarUrlList(jarURList) + .setJarUrlList(jarUrlList) .build(); } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java b/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java index a92685f3c..e828bec03 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java @@ -99,7 +99,7 @@ public class SideSqlExec { private Map localTableCache = Maps.newHashMap(); - public void exec(String sql, Map sideTableMap, StreamTableEnvironment tableEnv, + public void exec(String sql, Map sideTableMap, StreamTableEnvironment tableEnv, Map tableCache, StreamQueryConfig queryConfig, CreateTmpTableParser.SqlParserResult createView) throws Exception { if(localSqlPluginPath == null){ throw new RuntimeException("need to set localSqlPluginPath"); @@ -732,7 +732,7 @@ protected void dealAsSourceTable(StreamTableEnvironment tableEnv, Map tableCache, List replaceInfoList) throws SqlParseException { - AliasInfo aliasInfo = parseASNode(pollSqlNode); + AliasInfo aliasInfo = parseAsNode(pollSqlNode); if (localTableCache.containsKey(aliasInfo.getName())) { return; } diff --git a/core/src/main/java/com/dtstack/flink/sql/table/AbstractSideTableParser.java b/core/src/main/java/com/dtstack/flink/sql/table/AbstractSideTableParser.java index a168eebbc..39b72c22f 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/AbstractSideTableParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/AbstractSideTableParser.java @@ -108,8 +108,8 @@ protected void parseCacheProp(AbstractSideTableInfo sideTableInfo, Map 0){ sideTableInfo.setAsyncTimeoutNumLimit(asyncTimeoutNum); } diff --git a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java index 4229abb99..5ca81c5ed 100644 --- a/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java +++ b/elasticsearch5/elasticsearch5-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java @@ -20,6 +20,7 @@ package com.dtstack.flink.sql.sink.elasticsearch; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -33,7 +34,6 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.elasticsearch.table.ElasticsearchTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/elasticsearch6/elasticsearch6-side/elasticsearch6-all-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AllReqRow.java b/elasticsearch6/elasticsearch6-side/elasticsearch6-all-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AllReqRow.java index 648f09fba..b8cf2c46a 100644 --- a/elasticsearch6/elasticsearch6-side/elasticsearch6-all-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AllReqRow.java +++ b/elasticsearch6/elasticsearch6-side/elasticsearch6-all-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AllReqRow.java @@ -18,24 +18,22 @@ package com.dtstack.flink.sql.side.elasticsearch6; -import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.table.runtime.types.CRow; -import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo; -import org.apache.flink.types.Row; -import org.apache.flink.util.Collector; - -import com.dtstack.flink.sql.side.AllReqRow; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; +import com.dtstack.flink.sql.side.BaseAllReqRow; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; import com.dtstack.flink.sql.side.elasticsearch6.table.Elasticsearch6SideTableInfo; import com.dtstack.flink.sql.side.elasticsearch6.util.Es6Util; import com.dtstack.flink.sql.side.elasticsearch6.util.SwitchUtil; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.apache.calcite.sql.JoinType; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo; +import org.apache.flink.types.Row; +import org.apache.flink.util.Collector; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.RequestOptions; @@ -49,7 +47,6 @@ import java.io.IOException; import java.io.Serializable; -import java.sql.SQLException; import java.sql.Timestamp; import java.util.Calendar; import java.util.List; @@ -60,7 +57,7 @@ * @author yinxi * @date 2020/1/13 - 1:00 */ -public class Elasticsearch6AllReqRow extends AllReqRow implements Serializable { +public class Elasticsearch6AllReqRow extends BaseAllReqRow implements Serializable { private static final Logger LOG = LoggerFactory.getLogger(Elasticsearch6AllReqRow.class); @@ -70,7 +67,7 @@ public class Elasticsearch6AllReqRow extends AllReqRow implements Serializable { private SearchRequest searchRequest; private BoolQueryBuilder boolQueryBuilder; - public Elasticsearch6AllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public Elasticsearch6AllReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new Elasticsearch6AllSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); } diff --git a/elasticsearch6/elasticsearch6-side/elasticsearch6-all-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AllSideInfo.java b/elasticsearch6/elasticsearch6-side/elasticsearch6-all-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AllSideInfo.java index bf21b3ca8..d353a583d 100644 --- a/elasticsearch6/elasticsearch6-side/elasticsearch6-all-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AllSideInfo.java +++ b/elasticsearch6/elasticsearch6-side/elasticsearch6-all-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AllSideInfo.java @@ -18,12 +18,13 @@ package com.dtstack.flink.sql.side.elasticsearch6; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; + import com.dtstack.flink.sql.util.ParseUtils; import com.google.common.collect.Lists; import org.apache.calcite.sql.SqlNode; @@ -35,15 +36,15 @@ * @author yinxi * @date 2020/1/13 - 1:01 */ -public class Elasticsearch6AllSideInfo extends SideInfo { +public class Elasticsearch6AllSideInfo extends BaseSideInfo { - public Elasticsearch6AllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public Elasticsearch6AllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { } diff --git a/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncReqRow.java b/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncReqRow.java index f67177fe4..e8164edb2 100644 --- a/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncReqRow.java +++ b/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncReqRow.java @@ -57,14 +57,14 @@ * @author yinxi * @date 2020/2/13 - 13:10 */ -public class Elasticsearch6AsyncReqRow extends AsyncReqRow implements Serializable { +public class Elasticsearch6AsyncReqRow extends BaseAsyncReqRow implements Serializable { private static final Logger LOG = LoggerFactory.getLogger(Elasticsearch6AsyncReqRow.class); private transient RestHighLevelClient rhlClient; private SearchRequest searchRequest; private List sqlJoinCompareOperate = Lists.newArrayList(); - public Elasticsearch6AsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public Elasticsearch6AsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(new Elasticsearch6AsyncSideInfo(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo)); SqlNode conditionNode = joinInfo.getCondition(); ParseUtils.parseJoinCompareOperate(conditionNode, sqlJoinCompareOperate); diff --git a/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncSideInfo.java b/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncSideInfo.java index 7b3a2f9e7..4118988ab 100644 --- a/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncSideInfo.java +++ b/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncSideInfo.java @@ -18,18 +18,17 @@ package com.dtstack.flink.sql.side.elasticsearch6; -import org.apache.flink.api.java.typeutils.RowTypeInfo; - +import com.dtstack.flink.sql.side.AbstractSideTableInfo; +import com.dtstack.flink.sql.side.BaseSideInfo; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideInfo; -import com.dtstack.flink.sql.side.SideTableInfo; import com.dtstack.flink.sql.util.ParseUtils; import com.google.common.collect.Lists; import org.apache.calcite.sql.SqlBasicCall; import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlNode; +import org.apache.flink.api.java.typeutils.RowTypeInfo; import java.util.List; @@ -37,15 +36,15 @@ * @author yinxi * @date 2020/2/13 - 13:09 */ -public class Elasticsearch6AsyncSideInfo extends SideInfo { +public class Elasticsearch6AsyncSideInfo extends BaseSideInfo { - public Elasticsearch6AsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, SideTableInfo sideTableInfo) { + public Elasticsearch6AsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); } @Override - public void buildEqualInfo(JoinInfo joinInfo, SideTableInfo sideTableInfo) { + public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInfo) { String sideTableName = joinInfo.getSideTableName(); SqlNode conditionNode = joinInfo.getCondition(); diff --git a/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/table/Elasticsearch6SideParser.java b/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/table/Elasticsearch6SideParser.java index 13dfe2995..1b39bbf0f 100644 --- a/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/table/Elasticsearch6SideParser.java +++ b/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/table/Elasticsearch6SideParser.java @@ -19,8 +19,8 @@ package com.dtstack.flink.sql.side.elasticsearch6.table; import com.dtstack.flink.sql.side.elasticsearch6.util.ClassUtil; -import com.dtstack.flink.sql.table.AbsSideTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractSideTableParser; +import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import org.apache.commons.lang3.StringUtils; @@ -30,7 +30,7 @@ * @author yinxi * @date 2020/1/13 - 1:07 */ -public class Elasticsearch6SideParser extends AbsSideTableParser { +public class Elasticsearch6SideParser extends AbstractSideTableParser { private static final String KEY_ES6_ADDRESS = "address"; @@ -55,7 +55,7 @@ protected boolean fieldNameNeedsUpperCase() { } @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { Elasticsearch6SideTableInfo elasticsearch6SideTableInfo = new Elasticsearch6SideTableInfo(); elasticsearch6SideTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, elasticsearch6SideTableInfo); diff --git a/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/table/Elasticsearch6SideTableInfo.java b/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/table/Elasticsearch6SideTableInfo.java index 0afe2d59e..a01c90a53 100644 --- a/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/table/Elasticsearch6SideTableInfo.java +++ b/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/table/Elasticsearch6SideTableInfo.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.side.elasticsearch6.table; -import com.dtstack.flink.sql.side.SideTableInfo; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.google.common.base.Preconditions; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -26,7 +26,7 @@ * @author yinxi * @date 2020/1/13 - 15:00 */ -public class Elasticsearch6SideTableInfo extends SideTableInfo { +public class Elasticsearch6SideTableInfo extends AbstractSideTableInfo { private static final String CURR_TYPE = "elasticsearch6"; diff --git a/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/util/Es6Util.java b/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/util/Es6Util.java index cc591789e..3d9e4d62a 100644 --- a/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/util/Es6Util.java +++ b/elasticsearch6/elasticsearch6-side/elasticsearch6-side-core/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/util/Es6Util.java @@ -18,8 +18,8 @@ package com.dtstack.flink.sql.side.elasticsearch6.util; +import com.dtstack.flink.sql.side.BaseSideInfo; import com.dtstack.flink.sql.side.PredicateInfo; -import com.dtstack.flink.sql.side.SideInfo; import com.dtstack.flink.sql.side.elasticsearch6.table.Elasticsearch6SideTableInfo; import org.apache.commons.lang3.StringUtils; import org.apache.http.HttpHost; @@ -98,7 +98,7 @@ public static RestHighLevelClient getClient(String esAddress, Boolean isAuthMesh } // add index and type to search request - public static SearchRequest setSearchRequest(SideInfo sideInfo) { + public static SearchRequest setSearchRequest(BaseSideInfo sideInfo) { SearchRequest searchRequest = new SearchRequest(); Elasticsearch6SideTableInfo tableInfo = (Elasticsearch6SideTableInfo) sideInfo.getSideTableInfo(); // determine existence of index @@ -129,7 +129,7 @@ public static SearchRequest setSearchRequest(SideInfo sideInfo) { } // build where cause - public static BoolQueryBuilder setPredicateclause(SideInfo sideInfo) { + public static BoolQueryBuilder setPredicateclause(BaseSideInfo sideInfo) { BoolQueryBuilder boolQueryBuilder = null; List predicateInfoes = sideInfo.getSideTableInfo().getPredicateInfoes(); @@ -144,7 +144,7 @@ public static BoolQueryBuilder setPredicateclause(SideInfo sideInfo) { } // build filter condition - public static BoolQueryBuilder buildFilterCondition(BoolQueryBuilder boolQueryBuilder, PredicateInfo info, SideInfo sideInfo) { + public static BoolQueryBuilder buildFilterCondition(BoolQueryBuilder boolQueryBuilder, PredicateInfo info, BaseSideInfo sideInfo) { switch (info.getOperatorKind()) { case "IN": return boolQueryBuilder.must(QueryBuilders.termsQuery(textConvertToKeyword(info.getFieldName(), sideInfo), removeSpaceAndApostrophe(info.getCondition()))); @@ -202,7 +202,7 @@ public static String[] removeSpaceAndApostrophe(String str) { } // prevent word segmentation - public static String textConvertToKeyword(String fieldName, SideInfo sideInfo) { + public static String textConvertToKeyword(String fieldName, BaseSideInfo sideInfo) { String[] sideFieldTypes = sideInfo.getSideTableInfo().getFieldTypes(); int fieldIndex = sideInfo.getSideTableInfo().getFieldList().indexOf(fieldName.trim()); String fieldType = sideFieldTypes[fieldIndex]; diff --git a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java index 2646c50e9..b7d9de6fc 100644 --- a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java +++ b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/ElasticsearchSink.java @@ -18,6 +18,7 @@ package com.dtstack.flink.sql.sink.elasticsearch; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; @@ -31,7 +32,6 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.elasticsearch.table.ElasticsearchTableInfo; -import com.dtstack.flink.sql.table.TargetTableInfo; import com.google.common.collect.Maps; import org.apache.commons.lang.StringUtils; import org.apache.http.HttpHost; @@ -128,7 +128,7 @@ public void emitDataStream(DataStream> dataStream) { } @Override - public ElasticsearchSink genStreamSink(TargetTableInfo targetTableInfo) { + public ElasticsearchSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { esTableInfo = (ElasticsearchTableInfo) targetTableInfo; clusterName = esTableInfo.getClusterName(); index = esTableInfo.getIndex(); diff --git a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchSinkParser.java b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchSinkParser.java index 8902b953c..22c2b72bc 100644 --- a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchSinkParser.java +++ b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchSinkParser.java @@ -18,8 +18,8 @@ package com.dtstack.flink.sql.sink.elasticsearch.table; -import com.dtstack.flink.sql.table.AbsTableParser; -import com.dtstack.flink.sql.table.TableInfo; +import com.dtstack.flink.sql.table.AbstractTableInfo; +import com.dtstack.flink.sql.table.AbstractTableParser; import com.dtstack.flink.sql.util.MathUtil; import org.apache.commons.lang3.StringUtils; @@ -29,7 +29,7 @@ * @author yinxi * @date 2020/1/9 - 15:06 */ -public class ElasticsearchSinkParser extends AbsTableParser { +public class ElasticsearchSinkParser extends AbstractTableParser { private static final String KEY_ES6_ADDRESS = "address"; @@ -55,7 +55,7 @@ protected boolean fieldNameNeedsUpperCase() { } @Override - public TableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { + public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) { ElasticsearchTableInfo elasticsearchTableInfo = new ElasticsearchTableInfo(); elasticsearchTableInfo.setName(tableName); parseFieldsInfo(fieldsInfo, elasticsearchTableInfo); diff --git a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java index 3cc3dd9ff..d0eef18f0 100644 --- a/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java +++ b/elasticsearch6/elasticsearch6-sink/src/main/java/com/dtstack/flink/sql/sink/elasticsearch/table/ElasticsearchTableInfo.java @@ -18,7 +18,7 @@ package com.dtstack.flink.sql.sink.elasticsearch.table; -import com.dtstack.flink.sql.table.TargetTableInfo; +import com.dtstack.flink.sql.table.AbstractTargetTableInfo; import com.google.common.base.Preconditions; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang3.math.NumberUtils; @@ -29,7 +29,7 @@ * @author yinxi * @date 2020/1/9 - 15:06 */ -public class ElasticsearchTableInfo extends TargetTableInfo { +public class ElasticsearchTableInfo extends AbstractTargetTableInfo { private static final String CURR_TYPE = "elasticsearch6"; diff --git a/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/HbaseOutputFormat.java b/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/HbaseOutputFormat.java index 7709e68d9..382e8f975 100644 --- a/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/HbaseOutputFormat.java +++ b/hbase/hbase-sink/src/main/java/com/dtstack/flink/sql/sink/hbase/HbaseOutputFormat.java @@ -20,19 +20,14 @@ package com.dtstack.flink.sql.sink.hbase; -import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat; import com.dtstack.flink.sql.enums.EUpdateMode; +import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat; import com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.configuration.Configuration; import org.apache.flink.types.Row; import org.apache.flink.util.Preconditions; - -import com.dtstack.flink.sql.enums.EUpdateMode; -import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; -import com.google.common.collect.Lists; -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; diff --git a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index dbd231dfe..7105bc037 100644 --- a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -21,6 +21,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; import com.dtstack.flink.sql.table.AbstractTargetTableInfo; +import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; diff --git a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index 7dad2896f..0a991a8ea 100644 --- a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -21,6 +21,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; import com.dtstack.flink.sql.table.AbstractTargetTableInfo; +import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; diff --git a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index 5144f4486..34ea8fc5f 100644 --- a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -21,6 +21,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; import com.dtstack.flink.sql.table.AbstractTargetTableInfo; +import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; diff --git a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index 3c5d638cc..d7807a935 100644 --- a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -21,6 +21,7 @@ import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; import com.dtstack.flink.sql.table.AbstractTargetTableInfo; +import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.RowTypeInfo; diff --git a/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java b/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java index 93b5a379f..3373f3667 100644 --- a/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java +++ b/mongo/mongo-side/mongo-all-side/src/main/java/com/dtstack/flink/sql/side/mongo/MongoAllReqRow.java @@ -35,6 +35,7 @@ import com.mongodb.client.MongoDatabase; import org.apache.calcite.sql.JoinType; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.java.typeutils.RowTypeInfo; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -215,7 +216,7 @@ private void loadData(Map>> tmpCache) throws SQ String[] sideFieldNames = StringUtils.split(sideInfo.getSideSelectFields(), ","); BasicDBObject basicDBObject = new BasicDBObject(); for (String selectField : sideFieldNames) { - basicDbObject.append(selectField, 1); + basicDBObject.append(selectField, 1); } BasicDBObject filterObject = new BasicDBObject(); try { @@ -232,7 +233,7 @@ private void loadData(Map>> tmpCache) throws SQ } - FindIterable findIterable = dbCollection.find(filterObject).projection(basicDbObject).limit(FETCH_SIZE); + FindIterable findIterable = dbCollection.find(filterObject).projection(basicDBObject).limit(FETCH_SIZE); MongoCursor mongoCursor = findIterable.iterator(); while (mongoCursor.hasNext()) { Document doc = mongoCursor.next(); diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java index 339a4a3e8..17daecfb0 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java @@ -24,8 +24,7 @@ import org.apache.flink.types.Row; import org.apache.flink.util.Collector; -import com.dtstack.flink.sql.side.AllReqRow; -import com.dtstack.flink.sql.side.SideInfo; + import com.dtstack.flink.sql.side.BaseAllReqRow; import com.dtstack.flink.sql.side.BaseSideInfo; import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java index 481cd27d6..9ca0fd754 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java @@ -116,7 +116,7 @@ public void open(Connection connection) throws SQLException { @Override public void prepareStatement(Connection connection) throws SQLException { - this.deleteStatement = connection.prepareStatement(deleteSQL); + this.deleteStatement = connection.prepareStatement(deleteSql); } @Override @@ -235,7 +235,7 @@ public void open(Connection connection) throws SQLException { @Override public void prepareStatement(Connection connection) throws SQLException { super.prepareStatement(connection); - upsertStatement = connection.prepareStatement(upsertSQL); + upsertStatement = connection.prepareStatement(upsertSql); } @Override @@ -300,9 +300,9 @@ public void open(Connection connection) throws SQLException { @Override public void prepareStatement(Connection connection) throws SQLException { super.prepareStatement(connection); - existStatement = connection.prepareStatement(existSQL); - insertStatement = connection.prepareStatement(insertSQL); - updateStatement = connection.prepareStatement(updateSQL); + existStatement = connection.prepareStatement(existSql); + insertStatement = connection.prepareStatement(insertSql); + updateStatement = connection.prepareStatement(updateSql); } @Override diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java index 5c9fcf8ac..3559d4376 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AppendOnlyWriter.java @@ -64,7 +64,7 @@ public void open(Connection connection) throws SQLException { @Override public void prepareStatement(Connection connection) throws SQLException { - this.statement = connection.prepareStatement(insertSQL); + this.statement = connection.prepareStatement(insertSql); } /** diff --git a/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java b/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java index ba6dc51ab..d2a2aed56 100644 --- a/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java +++ b/redis5/redis5-side/redis-async-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAsyncReqRow.java @@ -18,6 +18,8 @@ package com.dtstack.flink.sql.side.redis; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; +import com.dtstack.flink.sql.side.BaseAsyncReqRow; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.functions.async.ResultFuture; @@ -25,11 +27,9 @@ import org.apache.flink.types.Row; import com.dtstack.flink.sql.enums.ECacheContentType; -import com.dtstack.flink.sql.side.AsyncReqRow; import com.dtstack.flink.sql.side.CacheMissVal; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; import com.dtstack.flink.sql.side.cache.CacheObj; import com.dtstack.flink.sql.side.redis.table.RedisSideReqRow; import com.dtstack.flink.sql.side.redis.table.RedisSideTableInfo; From b78a00511222379d99f2b7cb19f3ac63dbfd2779 Mon Sep 17 00:00:00 2001 From: zoudaokoulife Date: Tue, 10 Mar 2020 16:09:31 +0800 Subject: [PATCH 25/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=90=88=E5=B9=B6?= =?UTF-8?q?=E5=86=B2=E7=AA=81=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../sink/cassandra/CassandraOutputFormat.java | 2 +- .../flink/sql/side/kudu/KuduAllReqRow.java | 6 ++++++ .../flink/sql/side/kudu/KuduAsyncReqRow.java | 19 +++++++++---------- .../flink/sql/side/redis/RedisAllReqRow.java | 13 ++++++------- .../sql/sink/redis/RedisOutputFormat.java | 5 +---- 5 files changed, 23 insertions(+), 22 deletions(-) diff --git a/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/CassandraOutputFormat.java b/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/CassandraOutputFormat.java index 9cb180f8e..db9b87d8d 100644 --- a/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/CassandraOutputFormat.java +++ b/cassandra/cassandra-sink/src/main/java/com/dtstack/flink/sql/sink/cassandra/CassandraOutputFormat.java @@ -54,7 +54,7 @@ import com.datastax.driver.core.SocketOptions; import com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy; import com.datastax.driver.core.policies.RetryPolicy; -import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; +import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java b/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java index 505b21479..31f9aa13d 100644 --- a/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java +++ b/kudu/kudu-side/kudu-all-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAllReqRow.java @@ -7,11 +7,17 @@ import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.side.kudu.table.KuduSideTableInfo; import com.dtstack.flink.sql.side.kudu.utils.KuduUtil; +import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.calcite.sql.JoinType; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo; +import org.apache.flink.types.Row; +import org.apache.flink.util.Collector; import org.apache.kudu.ColumnSchema; import org.apache.kudu.Schema; import org.apache.kudu.client.KuduClient; diff --git a/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java b/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java index 5d3d10a79..e534a85f9 100644 --- a/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java +++ b/kudu/kudu-side/kudu-async-side/src/main/java/com/dtstack/flink/sql/side/kudu/KuduAsyncReqRow.java @@ -1,20 +1,12 @@ package com.dtstack.flink.sql.side.kudu; -import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.configuration.Configuration; -import org.apache.flink.streaming.api.functions.async.ResultFuture; -import org.apache.flink.table.runtime.types.CRow; -import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo; -import org.apache.flink.types.Row; -import org.apache.flink.util.Preconditions; - import com.dtstack.flink.sql.enums.ECacheContentType; -import com.dtstack.flink.sql.side.AsyncReqRow; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; +import com.dtstack.flink.sql.side.BaseAsyncReqRow; import com.dtstack.flink.sql.side.CacheMissVal; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; import com.dtstack.flink.sql.side.PredicateInfo; -import com.dtstack.flink.sql.side.SideTableInfo; import com.dtstack.flink.sql.side.cache.CacheObj; import com.dtstack.flink.sql.side.kudu.table.KuduSideTableInfo; import com.dtstack.flink.sql.side.kudu.utils.KuduUtil; @@ -24,6 +16,13 @@ import com.stumbleupon.async.Deferred; import io.vertx.core.json.JsonArray; import org.apache.commons.lang3.StringUtils; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.streaming.api.functions.async.ResultFuture; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo; +import org.apache.flink.types.Row; +import org.apache.flink.util.Preconditions; import org.apache.kudu.ColumnSchema; import org.apache.kudu.Schema; import org.apache.kudu.client.AsyncKuduClient; diff --git a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java index 7bc4fe60e..02f480105 100644 --- a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java +++ b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java @@ -18,15 +18,10 @@ package com.dtstack.flink.sql.side.redis; -import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.table.runtime.types.CRow; -import org.apache.flink.types.Row; -import org.apache.flink.util.Collector; - -import com.dtstack.flink.sql.side.AllReqRow; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; +import com.dtstack.flink.sql.side.BaseAllReqRow; import com.dtstack.flink.sql.side.FieldInfo; import com.dtstack.flink.sql.side.JoinInfo; -import com.dtstack.flink.sql.side.SideTableInfo; import com.dtstack.flink.sql.side.redis.table.RedisSideReqRow; import com.dtstack.flink.sql.side.redis.table.RedisSideTableInfo; import com.esotericsoftware.minlog.Log; @@ -34,6 +29,10 @@ import org.apache.calcite.sql.JoinType; import org.apache.commons.lang3.StringUtils; import org.apache.commons.pool2.impl.GenericObjectPoolConfig; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.types.Row; +import org.apache.flink.util.Collector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import redis.clients.jedis.HostAndPort; diff --git a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java index 8e807d0a9..ae4fe5a4b 100644 --- a/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java +++ b/redis5/redis5-sink/src/main/java/com/dtstack/flink/sql/sink/redis/RedisOutputFormat.java @@ -19,15 +19,12 @@ package com.dtstack.flink.sql.sink.redis; import com.dtstack.flink.sql.outputformat.AbstractDtRichOutputFormat; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.pool2.impl.GenericObjectPoolConfig; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.configuration.Configuration; import org.apache.flink.types.Row; - -import com.dtstack.flink.sql.outputformat.DtRichOutputFormat; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.pool2.impl.GenericObjectPoolConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import redis.clients.jedis.HostAndPort; From d56226622c15ca0c3ece5b8103c6012dd0a0dde9 Mon Sep 17 00:00:00 2001 From: maqi Date: Thu, 12 Mar 2020 10:41:46 +0800 Subject: [PATCH 26/47] fix conflict --- .../flink/sql/environment/StreamEnvConfigManager.java | 6 ++++++ hbase/pom.xml | 8 +------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java b/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java index 2522437c5..d7b772c6e 100644 --- a/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java +++ b/core/src/main/java/com/dtstack/flink/sql/environment/StreamEnvConfigManager.java @@ -18,6 +18,12 @@ package com.dtstack.flink.sql.environment; +import com.dtstack.flink.sql.constrant.ConfigConstrant; +import com.dtstack.flink.sql.enums.EStateBackend; +import com.dtstack.flink.sql.util.MathUtil; +import com.dtstack.flink.sql.util.PropertiesUtils; +import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.ExecutionConfig; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.api.common.time.Time; diff --git a/hbase/pom.xml b/hbase/pom.xml index f64ffab1b..17f767215 100644 --- a/hbase/pom.xml +++ b/hbase/pom.xml @@ -29,13 +29,7 @@ 1.0-SNAPSHOT provided - - jdk.tools - jdk.tools - 1.6 - system - ${JAVA_HOME}/lib/tools.jar - + org.apache.hbase From ba2d191ccbb2444a824205ce413457dcc8aacccd Mon Sep 17 00:00:00 2001 From: maqi Date: Thu, 12 Mar 2020 18:42:35 +0800 Subject: [PATCH 27/47] rdb connect pool size --- .../clickhouse/ClickhouseAsyncReqRow.java | 6 ++--- .../flink/sql/side/AbstractSideTableInfo.java | 14 ++++++++++ .../flink/sql/side/BaseAsyncReqRow.java | 2 -- .../sql/table/AbstractSideTableParser.java | 9 +++++++ .../flink/sql/side/db2/Db2AsyncReqRow.java | 4 +-- hbase/pom.xml | 7 ----- .../sql/side/impala/ImpalaAsyncReqRow.java | 25 ++++++++--------- .../sql/side/mysql/MysqlAsyncReqRow.java | 9 ++----- .../sql/side/oracle/OracleAsyncReqRow.java | 9 ++----- .../sql/side/polardb/PolardbAsyncReqRow.java | 4 +-- .../postgresql/PostgresqlAsyncReqRow.java | 4 +-- .../side/rdb/all/AbstractRdbAllReqRow.java | 13 +++++++++ .../sql/side/rdb/async/RdbAsyncReqRow.java | 27 ++++++++++++++++++- .../side/sqlserver/SqlserverAsyncReqRow.java | 4 +-- 14 files changed, 88 insertions(+), 49 deletions(-) diff --git a/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java b/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java index e6f008d63..ec9a77c6b 100644 --- a/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java +++ b/clickhouse/clickhouse-side/clickhouse-async-side/src/main/java/com/dtstack/flink/sql/side/clickhouse/ClickhouseAsyncReqRow.java @@ -48,14 +48,14 @@ public void open(Configuration parameters) throws Exception { RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); clickhouseClientConfig.put("url", rdbSideTableInfo.getUrl()) .put("driver_class", CLICKHOUSE_DRIVER) - .put("max_pool_size", DEFAULT_MAX_DB_CONN_POOL_SIZE) + .put("max_pool_size", rdbSideTableInfo.getAsyncPoolSize()) .put("user", rdbSideTableInfo.getUserName()) .put("password", rdbSideTableInfo.getPassword()) .put("provider_class", DT_PROVIDER_CLASS); System.setProperty("vertx.disableFileCPResolving", "true"); VertxOptions vo = new VertxOptions(); - vo.setEventLoopPoolSize(DEFAULT_VERTX_EVENT_LOOP_POOL_SIZE); - vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); + vo.setEventLoopPoolSize(rdbSideTableInfo.getAsyncPoolSize()); + vo.setWorkerPoolSize(rdbSideTableInfo.getAsyncPoolSize()); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); setRdbSqlClient(JDBCClient.createNonShared(vertx, clickhouseClientConfig)); diff --git a/core/src/main/java/com/dtstack/flink/sql/side/AbstractSideTableInfo.java b/core/src/main/java/com/dtstack/flink/sql/side/AbstractSideTableInfo.java index cacba74fd..0fe177f96 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/AbstractSideTableInfo.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/AbstractSideTableInfo.java @@ -55,6 +55,8 @@ public abstract class AbstractSideTableInfo extends AbstractTableInfo implements public static final String ASYNC_TIMEOUT_NUM_KEY = "asyncTimeoutNum"; + public static final String ASYNC_REQ_POOL_KEY = "asyncPoolSize"; + private String cacheType = "none";//None or LRU or ALL private int cacheSize = 10000; @@ -65,6 +67,11 @@ public abstract class AbstractSideTableInfo extends AbstractTableInfo implements private int asyncTimeout=10000; + /** + * async operator req outside conn pool size, egg rdb conn pool size + */ + private int asyncPoolSize = 0; + private int asyncTimeoutNumLimit = Integer.MAX_VALUE; private boolean partitionedJoin = false; @@ -156,4 +163,11 @@ public void setAsyncTimeoutNumLimit(int asyncTimeoutNumLimit) { this.asyncTimeoutNumLimit = asyncTimeoutNumLimit; } + public int getAsyncPoolSize() { + return asyncPoolSize; + } + + public void setAsyncPoolSize(int asyncPoolSize) { + this.asyncPoolSize = asyncPoolSize; + } } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/BaseAsyncReqRow.java b/core/src/main/java/com/dtstack/flink/sql/side/BaseAsyncReqRow.java index a0deb6b74..e5bce4437 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/BaseAsyncReqRow.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/BaseAsyncReqRow.java @@ -30,14 +30,12 @@ import org.apache.flink.metrics.Counter; import org.apache.flink.streaming.api.functions.async.ResultFuture; import org.apache.flink.streaming.api.functions.async.RichAsyncFunction; -import org.apache.flink.streaming.api.operators.async.queue.StreamRecordQueueEntry; import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.types.Row; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; -import java.util.concurrent.TimeoutException; /** * All interfaces inherit naming rules: type + "AsyncReqRow" such as == "MysqlAsyncReqRow diff --git a/core/src/main/java/com/dtstack/flink/sql/table/AbstractSideTableParser.java b/core/src/main/java/com/dtstack/flink/sql/table/AbstractSideTableParser.java index 39b72c22f..4973a891e 100644 --- a/core/src/main/java/com/dtstack/flink/sql/table/AbstractSideTableParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/table/AbstractSideTableParser.java @@ -23,6 +23,8 @@ import com.dtstack.flink.sql.enums.ECacheType; import com.dtstack.flink.sql.side.AbstractSideTableInfo; import com.dtstack.flink.sql.util.MathUtil; +import org.apache.flink.util.Preconditions; + import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -114,6 +116,13 @@ protected void parseCacheProp(AbstractSideTableInfo sideTableInfo, Map 0 && asyncPoolSize <= 20, "asyncPoolSize size limit (0,20]"); + sideTableInfo.setAsyncPoolSize(asyncPoolSize); + } + } } } diff --git a/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java b/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java index 240bab9d1..8e7275977 100644 --- a/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java +++ b/db2/db2-side/db2-async-side/src/main/java/com/dtstack/flink/sql/side/db2/Db2AsyncReqRow.java @@ -59,7 +59,7 @@ public void open(Configuration parameters) throws Exception { RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); db2lientConfig.put("url", rdbSideTableInfo.getUrl()) .put("driver_class", DB2_DRIVER) - .put("max_pool_size", DEFAULT_MAX_DB_CONN_POOL_SIZE) + .put("max_pool_size", rdbSideTableInfo.getAsyncPoolSize()) .put("user", rdbSideTableInfo.getUserName()) .put("password", rdbSideTableInfo.getPassword()) .put("provider_class", DT_PROVIDER_CLASS) @@ -71,7 +71,7 @@ public void open(Configuration parameters) throws Exception { VertxOptions vo = new VertxOptions(); vo.setEventLoopPoolSize(DEFAULT_VERTX_EVENT_LOOP_POOL_SIZE); - vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); + vo.setWorkerPoolSize(rdbSideTableInfo.getAsyncPoolSize()); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); setRdbSqlClient(JDBCClient.createNonShared(vertx, db2lientConfig)); diff --git a/hbase/pom.xml b/hbase/pom.xml index f64ffab1b..835457e29 100644 --- a/hbase/pom.xml +++ b/hbase/pom.xml @@ -29,13 +29,6 @@ 1.0-SNAPSHOT provided - - jdk.tools - jdk.tools - 1.6 - system - ${JAVA_HOME}/lib/tools.jar - org.apache.hbase diff --git a/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java b/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java index 77d3f3007..7ab603650 100644 --- a/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java +++ b/impala/impala-side/impala-async-side/src/main/java/com/dtstack/flink/sql/side/impala/ImpalaAsyncReqRow.java @@ -57,31 +57,28 @@ public ImpalaAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { @@ -61,7 +56,7 @@ public void open(Configuration parameters) throws Exception { RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); mysqlClientConfig.put("url", rdbSideTableInfo.getUrl()) .put("driver_class", MYSQL_DRIVER) - .put("max_pool_size", DEFAULT_MAX_DB_CONN_POOL_SIZE) + .put("max_pool_size", rdbSideTableInfo.getAsyncPoolSize()) .put("user", rdbSideTableInfo.getUserName()) .put("password", rdbSideTableInfo.getPassword()) .put("provider_class", DT_PROVIDER_CLASS) @@ -73,7 +68,7 @@ public void open(Configuration parameters) throws Exception { VertxOptions vo = new VertxOptions(); vo.setEventLoopPoolSize(DEFAULT_VERTX_EVENT_LOOP_POOL_SIZE); - vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); + vo.setWorkerPoolSize(rdbSideTableInfo.getAsyncPoolSize()); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); setRdbSqlClient(JDBCClient.createNonShared(vertx, mysqlClientConfig)); diff --git a/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java b/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java index c0b37e7ac..938fd870c 100644 --- a/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java +++ b/oracle/oracle-side/oracle-async-side/src/main/java/com/dtstack/flink/sql/side/oracle/OracleAsyncReqRow.java @@ -30,16 +30,11 @@ import io.vertx.ext.jdbc.JDBCClient; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.configuration.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.util.List; public class OracleAsyncReqRow extends RdbAsyncReqRow { - - private static final Logger LOG = LoggerFactory.getLogger(OracleAsyncReqRow.class); - private static final String ORACLE_DRIVER = "oracle.jdbc.driver.OracleDriver"; public OracleAsyncReqRow(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { @@ -53,7 +48,7 @@ public void open(Configuration parameters) throws Exception { RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); oracleClientConfig.put("url", rdbSideTableInfo.getUrl()) .put("driver_class", ORACLE_DRIVER) - .put("max_pool_size", DEFAULT_MAX_DB_CONN_POOL_SIZE) + .put("max_pool_size", rdbSideTableInfo.getAsyncPoolSize()) .put("user", rdbSideTableInfo.getUserName()) .put("password", rdbSideTableInfo.getPassword()) .put("provider_class", DT_PROVIDER_CLASS) @@ -65,7 +60,7 @@ public void open(Configuration parameters) throws Exception { VertxOptions vo = new VertxOptions(); vo.setEventLoopPoolSize(DEFAULT_VERTX_EVENT_LOOP_POOL_SIZE); - vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); + vo.setWorkerPoolSize(rdbSideTableInfo.getAsyncPoolSize()); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); setRdbSqlClient(JDBCClient.createNonShared(vertx, oracleClientConfig)); diff --git a/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java b/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java index 286f17286..13d25b111 100644 --- a/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java +++ b/polardb/polardb-side/polardb-async-side/src/main/java/com/dtstack/flink/sql/side/polardb/PolardbAsyncReqRow.java @@ -55,7 +55,7 @@ public void open(Configuration parameters) throws Exception { RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); mysqlClientConfig.put("url", rdbSideTableInfo.getUrl()) .put("driver_class", POLARDB_DRIVER) - .put("max_pool_size", DEFAULT_MAX_DB_CONN_POOL_SIZE) + .put("max_pool_size", rdbSideTableInfo.getAsyncPoolSize()) .put("user", rdbSideTableInfo.getUserName()) .put("password", rdbSideTableInfo.getPassword()) .put("provider_class", DT_PROVIDER_CLASS) @@ -67,7 +67,7 @@ public void open(Configuration parameters) throws Exception { VertxOptions vo = new VertxOptions(); vo.setEventLoopPoolSize(DEFAULT_VERTX_EVENT_LOOP_POOL_SIZE); - vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); + vo.setWorkerPoolSize(rdbSideTableInfo.getAsyncPoolSize()); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); setRdbSqlClient(JDBCClient.createNonShared(vertx, mysqlClientConfig)); diff --git a/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java b/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java index 2a9e0137c..0efc077e0 100644 --- a/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java +++ b/postgresql/postgresql-side/postgresql-async-side/src/main/java/com/dtstack/flink/sql/side/postgresql/PostgresqlAsyncReqRow.java @@ -60,13 +60,13 @@ public void open(Configuration parameters) throws Exception { RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); pgClientConfig.put("url", rdbSideTableInfo.getUrl()) .put("driver_class", POSTGRESQL_DRIVER) - .put("max_pool_size", DEFAULT_MAX_DB_CONN_POOL_SIZE) + .put("max_pool_size", rdbSideTableInfo.getAsyncPoolSize()) .put("user", rdbSideTableInfo.getUserName()) .put("password", rdbSideTableInfo.getPassword()); VertxOptions vo = new VertxOptions(); vo.setEventLoopPoolSize(DEFAULT_VERTX_EVENT_LOOP_POOL_SIZE); - vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); + vo.setWorkerPoolSize(rdbSideTableInfo.getAsyncPoolSize()); Vertx vertx = Vertx.vertx(vo); setRdbSqlClient(JDBCClient.createNonShared(vertx, pgClientConfig)); } diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java index 17daecfb0..8fb35b7eb 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java @@ -19,6 +19,7 @@ package com.dtstack.flink.sql.side.rdb.all; import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.configuration.Configuration; import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo; import org.apache.flink.types.Row; @@ -73,6 +74,18 @@ public AbstractRdbAllReqRow(BaseSideInfo sideInfo) { super(sideInfo); } + @Override + public void open(Configuration parameters) throws Exception { + super.open(parameters); + RdbSideTableInfo tableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); + LOG.info("jdbc url: {} ", tableInfo.getUrl()); + LOG.info("query table name: {}", tableInfo.getTableName()); + LOG.info("all cache type: {}", tableInfo.getCacheType()); + LOG.info("all cache size: {}", tableInfo.getCacheSize()); + LOG.info("all cache timeout: {}", tableInfo.getCacheTimeout()); + } + + @Override protected void initCache() throws SQLException { Map>> newCache = Maps.newConcurrentMap(); diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java index f78c30c30..b4a1c25e2 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java @@ -24,11 +24,13 @@ import com.dtstack.flink.sql.side.CacheMissVal; import com.dtstack.flink.sql.side.BaseSideInfo; import com.dtstack.flink.sql.side.cache.CacheObj; +import com.dtstack.flink.sql.side.rdb.table.RdbSideTableInfo; import com.dtstack.flink.sql.side.rdb.util.SwitchUtil; import io.vertx.core.json.JsonArray; import io.vertx.ext.sql.SQLClient; import io.vertx.ext.sql.SQLConnection; import com.google.common.collect.Lists; +import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.functions.async.ResultFuture; import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo; @@ -57,7 +59,9 @@ public class RdbAsyncReqRow extends BaseAsyncReqRow { public final static int DEFAULT_VERTX_WORKER_POOL_SIZE = Runtime.getRuntime().availableProcessors() * 2; - public final static int DEFAULT_MAX_DB_CONN_POOL_SIZE = DEFAULT_VERTX_EVENT_LOOP_POOL_SIZE + DEFAULT_VERTX_WORKER_POOL_SIZE; + public final static int DEFAULT_DB_CONN_POOL_SIZE = DEFAULT_VERTX_EVENT_LOOP_POOL_SIZE + DEFAULT_VERTX_WORKER_POOL_SIZE; + + public final static int MAX_DB_CONN_POOL_SIZE_LIMIT = 20; public final static int DEFAULT_IDLE_CONNECTION_TEST_PEROID = 60; @@ -71,6 +75,27 @@ public class RdbAsyncReqRow extends BaseAsyncReqRow { public RdbAsyncReqRow(BaseSideInfo sideInfo) { super(sideInfo); + init(sideInfo); + } + + protected void init(BaseSideInfo sideInfo) { + RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); + int defaultAsyncPoolSize = Math.min(MAX_DB_CONN_POOL_SIZE_LIMIT, DEFAULT_DB_CONN_POOL_SIZE); + int rdbPoolSize = rdbSideTableInfo.getAsyncPoolSize() > 0 ? rdbSideTableInfo.getAsyncPoolSize() : defaultAsyncPoolSize; + rdbSideTableInfo.setAsyncPoolSize(rdbPoolSize); + } + + @Override + public void open(Configuration parameters) throws Exception { + super.open(parameters); + RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); + LOG.info("use rdb pool size: {}", rdbSideTableInfo.getAsyncPoolSize()); + LOG.info("jdbc url: {} ", rdbSideTableInfo.getUrl()); + LOG.info("query table name: {}", rdbSideTableInfo.getTableName()); + LOG.info("async cache type: {}", rdbSideTableInfo.getCacheType()); + LOG.info("async cache mode: {}", rdbSideTableInfo.getCacheMode()); + LOG.info("async cache capacity: {}", rdbSideTableInfo.getAsyncCapacity()); + LOG.info("async cache timeout: {}", rdbSideTableInfo.getAsyncTimeout()); } @Override diff --git a/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java b/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java index f78061fca..a63d28607 100644 --- a/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java +++ b/sqlserver/sqlserver-side/sqlserver-async-side/src/main/java/com/dtstack/flink/sql/side/sqlserver/SqlserverAsyncReqRow.java @@ -55,7 +55,7 @@ public void open(Configuration parameters) throws Exception { RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); sqlserverClientConfig.put("url", rdbSideTableInfo.getUrl()) .put("driver_class", SQLSERVER_DRIVER) - .put("max_pool_size", DEFAULT_MAX_DB_CONN_POOL_SIZE) + .put("max_pool_size", rdbSideTableInfo.getAsyncPoolSize()) .put("user", rdbSideTableInfo.getUserName()) .put("password", rdbSideTableInfo.getPassword()) .put("provider_class", DT_PROVIDER_CLASS) @@ -68,7 +68,7 @@ public void open(Configuration parameters) throws Exception { VertxOptions vo = new VertxOptions(); vo.setEventLoopPoolSize(DEFAULT_VERTX_EVENT_LOOP_POOL_SIZE); - vo.setWorkerPoolSize(DEFAULT_VERTX_WORKER_POOL_SIZE); + vo.setWorkerPoolSize(rdbSideTableInfo.getAsyncPoolSize()); vo.setFileResolverCachingEnabled(false); Vertx vertx = Vertx.vertx(vo); setRdbSqlClient(JDBCClient.createNonShared(vertx, sqlserverClientConfig)); From 229afced132eb5a367914b019daf08e4347e61e7 Mon Sep 17 00:00:00 2001 From: maqi Date: Fri, 13 Mar 2020 11:12:56 +0800 Subject: [PATCH 28/47] connection info --- .../flink/sql/side/AbstractSideTableInfo.java | 15 +++++++++++++++ .../sql/side/rdb/all/AbstractRdbAllReqRow.java | 6 +----- .../flink/sql/side/rdb/async/RdbAsyncReqRow.java | 8 +------- .../sql/side/rdb/table/RdbSideTableInfo.java | 11 +++++++++++ 4 files changed, 28 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/side/AbstractSideTableInfo.java b/core/src/main/java/com/dtstack/flink/sql/side/AbstractSideTableInfo.java index 0fe177f96..7a832d0a1 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/AbstractSideTableInfo.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/AbstractSideTableInfo.java @@ -170,4 +170,19 @@ public int getAsyncPoolSize() { public void setAsyncPoolSize(int asyncPoolSize) { this.asyncPoolSize = asyncPoolSize; } + + @Override + public String toString() { + return "Cache Info{" + + "cacheType='" + cacheType + '\'' + + ", cacheSize=" + cacheSize + + ", cacheTimeout=" + cacheTimeout + + ", asyncCapacity=" + asyncCapacity + + ", asyncTimeout=" + asyncTimeout + + ", asyncPoolSize=" + asyncPoolSize + + ", asyncTimeoutNumLimit=" + asyncTimeoutNumLimit + + ", partitionedJoin=" + partitionedJoin + + ", cacheMode='" + cacheMode + '\'' + + '}'; + } } diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java index 8fb35b7eb..2e9986742 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/AbstractRdbAllReqRow.java @@ -78,11 +78,7 @@ public AbstractRdbAllReqRow(BaseSideInfo sideInfo) { public void open(Configuration parameters) throws Exception { super.open(parameters); RdbSideTableInfo tableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); - LOG.info("jdbc url: {} ", tableInfo.getUrl()); - LOG.info("query table name: {}", tableInfo.getTableName()); - LOG.info("all cache type: {}", tableInfo.getCacheType()); - LOG.info("all cache size: {}", tableInfo.getCacheSize()); - LOG.info("all cache timeout: {}", tableInfo.getCacheTimeout()); + LOG.info("rdb dim table config info: {} ", tableInfo.toString()); } diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java index b4a1c25e2..356e9d665 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java @@ -89,13 +89,7 @@ protected void init(BaseSideInfo sideInfo) { public void open(Configuration parameters) throws Exception { super.open(parameters); RdbSideTableInfo rdbSideTableInfo = (RdbSideTableInfo) sideInfo.getSideTableInfo(); - LOG.info("use rdb pool size: {}", rdbSideTableInfo.getAsyncPoolSize()); - LOG.info("jdbc url: {} ", rdbSideTableInfo.getUrl()); - LOG.info("query table name: {}", rdbSideTableInfo.getTableName()); - LOG.info("async cache type: {}", rdbSideTableInfo.getCacheType()); - LOG.info("async cache mode: {}", rdbSideTableInfo.getCacheMode()); - LOG.info("async cache capacity: {}", rdbSideTableInfo.getAsyncCapacity()); - LOG.info("async cache timeout: {}", rdbSideTableInfo.getAsyncTimeout()); + LOG.info("rdb dim table config info: {} ", rdbSideTableInfo.toString()); } @Override diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideTableInfo.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideTableInfo.java index 506033007..9a4fa3c4f 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideTableInfo.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/table/RdbSideTableInfo.java @@ -99,4 +99,15 @@ public String getPassword() { public void setPassword(String password) { this.password = password; } + + @Override + public String toString() { + String cacheInfo = super.toString(); + String connectionInfo = "Rdb Side Connection Info{" + + "url='" + url + '\'' + + ", tableName='" + tableName + '\'' + + ", schema='" + schema + '\'' + + '}'; + return cacheInfo + " , " + connectionInfo; + } } From b3e9ea6b20efb8994e885fd76042827bdb3bb57b Mon Sep 17 00:00:00 2001 From: dapeng Date: Mon, 16 Mar 2020 20:00:37 +0800 Subject: [PATCH 29/47] =?UTF-8?q?=E6=97=B6=E9=97=B4=E6=A0=BC=E5=BC=8F?= =?UTF-8?q?=E5=8C=96=E5=8E=BB=E9=99=A4=E5=86=97=E4=BD=99=E6=96=B9=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../com/dtstack/flink/sql/util/DateUtil.java | 410 ++++++++---------- .../sql/side/rdb/async/RdbAsyncReqRow.java | 4 +- 2 files changed, 194 insertions(+), 220 deletions(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java index a0f5291e7..2b2dc4715 100644 --- a/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java +++ b/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java @@ -16,29 +16,24 @@ * limitations under the License. */ - + package com.dtstack.flink.sql.util; import java.sql.Timestamp; import java.text.ParseException; import java.text.SimpleDateFormat; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalTime; -import java.time.ZoneOffset; -import java.util.Calendar; -import java.util.Date; -import java.util.Locale; -import java.util.SimpleTimeZone; -import java.util.TimeZone; +import java.time.*; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; +import java.util.*; import java.util.regex.Pattern; import static java.time.format.DateTimeFormatter.ISO_INSTANT; /** - * + * * Reason: TODO ADD REASON(可选) * Date: 2017年03月10日 下午1:16:37 * Company: www.dtstack.com @@ -47,20 +42,16 @@ */ public class DateUtil { - static final String timeZone = "GMT+8"; - static final String datetimeFormat = "yyyy-MM-dd HH:mm:ss"; - static final String dateFormat = "yyyy-MM-dd"; - static final String timeFormat = "HH:mm:ss"; - static final SimpleDateFormat datetimeFormatter = new SimpleDateFormat(datetimeFormat); - static final SimpleDateFormat dateFormatter = new SimpleDateFormat(dateFormat); - static final SimpleDateFormat timeFormatter = new SimpleDateFormat(timeFormat); + static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"); + static final DateTimeFormatter DATE_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd"); + static final DateTimeFormatter TIME_FORMATTER = DateTimeFormatter.ofPattern("HH:mm:ss"); private static final Pattern DATETIME = Pattern.compile("^\\d{4}-(?:0[0-9]|1[0-2])-[0-9]{2}T\\d{2}:\\d{2}:\\d{2}(\\.\\d{3,9})?Z$"); private static final Pattern DATE = Pattern.compile("^\\d{4}-(?:0[0-9]|1[0-2])-[0-9]{2}$"); - private static final int MILLIS_PER_SECOND = 1000; + public static java.sql.Date columnToDate(Object column) { if(column instanceof String) { return new java.sql.Date(stringToDate((String)column).getTime()); @@ -84,23 +75,32 @@ public static Date stringToDate(String strDate) { return null; } try { - return datetimeFormatter.parse(strDate); - } catch (ParseException ignored) { + ; + return localDateTimetoDate(LocalDateTime.parse(strDate, DATE_TIME_FORMATTER)); + } catch (DateTimeParseException ignored) { } try { - return dateFormatter.parse(strDate); - } catch (ParseException ignored) { + return localDateTimetoDate(LocalDate.parse(strDate, DATE_FORMATTER).atStartOfDay()); + } catch (DateTimeParseException ignored) { } try { - return timeFormatter.parse(strDate); - } catch (ParseException ignored) { + return localDateTimetoDate(LocalDateTime.of(LocalDate.now(), LocalTime.parse(strDate, TIME_FORMATTER))); + } catch (DateTimeParseException ignored) { } throw new RuntimeException("can't parse date"); } + public static Date localDateTimetoDate(LocalDateTime localDateTime){ + return Date.from(localDateTime.atZone(ZoneId.systemDefault()).toInstant()); + } + + public static LocalDateTime dateToLocalDateTime(Date date){ + return date.toInstant().atZone(ZoneId.systemDefault()).toLocalDateTime(); + } + /** * * @@ -130,13 +130,13 @@ public static long getTodayStart(long day) { * @return */ public static long getTodayStart(long day,String scope) { - if(scope.equals("MS")){ - return getTodayStart(day)*1000; - }else if(scope.equals("S")){ - return getTodayStart(day); - }else{ - return getTodayStart(day); - } + if("MS".equals(scope)){ + return getTodayStart(day)*1000; + }else if("S".equals(scope)){ + return getTodayStart(day); + }else{ + return getTodayStart(day); + } } /** @@ -168,15 +168,15 @@ public static long getNextDayStart(long day) { * @return */ public static long getNextDayStart(long day,String scope) { - if(scope.equals("MS")){ - return getNextDayStart(day)*1000; - }else if(scope.equals("S")){ - return getNextDayStart(day); - }else{ - return getNextDayStart(day); - } + if("MS".equals(scope)){ + return getNextDayStart(day)*1000; + }else if("S".equals(scope)){ + return getNextDayStart(day); + }else{ + return getNextDayStart(day); + } } - + /** * @@ -237,7 +237,7 @@ public static long getWeekFirst(long day) { /** * 根据某个日期时间戳秒值,获取所在周在一年中是第几周. - * + * * @param day * @return */ @@ -257,17 +257,17 @@ public static int getWeekOfYear(long day) { */ public static String getYesterdayByString(String day, String inFormat, String outFormat){ try { - SimpleDateFormat sdf = new SimpleDateFormat(inFormat); - Date date = sdf.parse(day); - Calendar calendar = Calendar.getInstance(); - calendar.setTime(date); - int calendarDay = calendar.get(Calendar.DATE); - calendar.set(Calendar.DATE, calendarDay - 1); - String dayBefore = new SimpleDateFormat(outFormat).format(calendar.getTime()); - return dayBefore; - } catch (ParseException e) { - return null; - } + SimpleDateFormat sdf = new SimpleDateFormat(inFormat); + Date date = sdf.parse(day); + Calendar calendar = Calendar.getInstance(); + calendar.setTime(date); + int calendarDay = calendar.get(Calendar.DATE); + calendar.set(Calendar.DATE, calendarDay - 1); + String dayBefore = new SimpleDateFormat(outFormat).format(calendar.getTime()); + return dayBefore; + } catch (ParseException e) { + return null; + } } /** @@ -288,7 +288,7 @@ public static String getTomorrowByString(String day, String inFormat, String out String dayBefore = new SimpleDateFormat(outFormat).format(calendar.getTime()); return dayBefore; } - + /** * * @param date @@ -320,7 +320,7 @@ public static String get30DaysBeforeByString(String day, String inFormat, String calendar.set(Calendar.DATE, calendarDay - 30); return new SimpleDateFormat(outFormat).format(calendar.getTime()); } - + /** * * @param day @@ -330,14 +330,14 @@ public static String get30DaysBeforeByString(String day, String inFormat, String * @throws ParseException */ public static String get30DaysLaterByString(String day, String inFormat, String outFormat) throws ParseException { - SimpleDateFormat sdf = new SimpleDateFormat(inFormat); - Date date = sdf.parse(day); - Calendar calendar = Calendar.getInstance(); - calendar.setTime(date); - int calendarDay = calendar.get(Calendar.DATE); - calendar.set(Calendar.DATE, calendarDay + 30); - String dayBefore = new SimpleDateFormat(outFormat).format(calendar.getTime()); - return dayBefore; + SimpleDateFormat sdf = new SimpleDateFormat(inFormat); + Date date = sdf.parse(day); + Calendar calendar = Calendar.getInstance(); + calendar.setTime(date); + int calendarDay = calendar.get(Calendar.DATE); + calendar.set(Calendar.DATE, calendarDay + 30); + String dayBefore = new SimpleDateFormat(outFormat).format(calendar.getTime()); + return dayBefore; } @@ -349,7 +349,7 @@ public static String get30DaysLaterByString(String day, String inFormat, String * @return String * @throws ParseException */ - public static String getDateStrTOFormat(String day, String inFormat, String outFormat) throws ParseException { + public static String getDateStrToFormat(String day, String inFormat, String outFormat) throws ParseException { SimpleDateFormat sdf = new SimpleDateFormat(inFormat); Date date = sdf.parse(day); Calendar calendar = Calendar.getInstance(); @@ -357,8 +357,8 @@ public static String getDateStrTOFormat(String day, String inFormat, String outF String dayBefore = new SimpleDateFormat(outFormat).format(calendar.getTime()); return dayBefore; } - - public static long getDateMillTOFormat(String day, String inFormat) throws ParseException { + + public static long getDateMillToFormat(String day, String inFormat) throws ParseException { SimpleDateFormat sdf = new SimpleDateFormat(inFormat); Date date = sdf.parse(day); Calendar calendar = Calendar.getInstance(); @@ -474,79 +474,79 @@ public static long getMillByLastWeekDay() { cal.set(Calendar.MILLISECOND, 0); return cal.getTimeInMillis() / 1000; } - - /** - * @return long - */ + + /** + * @return long + */ public static long getMillByDay(int severalDays,String condition) { - int dateT=0; + int dateT=0; Calendar cal = Calendar.getInstance(); - if(condition==null){ - return getMillToDay(cal,dateT); - } - if(condition.equals("-")){ - dateT = (cal.get(Calendar.DATE) - severalDays); - return getMillToDay(cal,dateT); - } - if(condition.equals("+")){ - dateT = (cal.get(Calendar.DATE) + severalDays); - return getMillToDay(cal,dateT); - } - return getMillToDay(cal,dateT); - } - + if(condition==null){ + return getMillToDay(cal,dateT); + } + if("-".equals(condition)){ + dateT = (cal.get(Calendar.DATE) - severalDays); + return getMillToDay(cal,dateT); + } + if("+".equals(condition)){ + dateT = (cal.get(Calendar.DATE) + severalDays); + return getMillToDay(cal,dateT); + } + return getMillToDay(cal,dateT); + } + /** * @return long */ public static long getStampByDay(int severalDays,String condition) { - int dateT=0; - Calendar cal = Calendar.getInstance(); - if(condition==null){ - return getStampToDay(cal,dateT); - } - if(condition.equals("-")){ - dateT = (cal.get(Calendar.DATE) - severalDays); - return getStampToDay(cal,dateT); - } - if(condition.equals("+")){ - dateT = (cal.get(Calendar.DATE) + severalDays); - return getStampToDay(cal,dateT); - } - return getStampToDay(cal,dateT); + int dateT=0; + Calendar cal = Calendar.getInstance(); + if(condition==null){ + return getStampToDay(cal,dateT); + } + if("-".equals(condition)){ + dateT = (cal.get(Calendar.DATE) - severalDays); + return getStampToDay(cal,dateT); + } + if("+".equals(condition)){ + dateT = (cal.get(Calendar.DATE) + severalDays); + return getStampToDay(cal,dateT); + } + return getStampToDay(cal,dateT); } /** * @return long */ public static long getMillByDay(){ - return getMillByDay(0,null); + return getMillByDay(0,null); } - + /** * @param cal Calendar - * @param dateT Integer + * @param dateT Integer * @return long */ public static long getMillToDay(Calendar cal,int dateT){ - if(dateT!=0){ - cal.set(Calendar.DATE, dateT); - } - cal.set(Calendar.HOUR_OF_DAY, 0); - cal.set(Calendar.MINUTE, 0); - cal.set(Calendar.SECOND, 0); - cal.set(Calendar.MILLISECOND, 0); - return cal.getTimeInMillis()/1000; - } - + if(dateT!=0){ + cal.set(Calendar.DATE, dateT); + } + cal.set(Calendar.HOUR_OF_DAY, 0); + cal.set(Calendar.MINUTE, 0); + cal.set(Calendar.SECOND, 0); + cal.set(Calendar.MILLISECOND, 0); + return cal.getTimeInMillis()/1000; + } + /** * @param cal Calendar - * @param dateT Integer + * @param dateT Integer * @return long */ public static long getStampToDay(Calendar cal,int dateT){ - if(dateT!=0){ - cal.set(Calendar.DATE, dateT); - } - return cal.getTimeInMillis(); + if(dateT!=0){ + cal.set(Calendar.DATE, dateT); + } + return cal.getTimeInMillis(); } public static String getToday() { @@ -568,7 +568,7 @@ public static String getDate(long day, String format) { SimpleDateFormat sf = new SimpleDateFormat(format); return sf.format(cal.getTime()); } - + /** * * @param date @@ -578,21 +578,21 @@ public static String getDate(Date date, String format) { SimpleDateFormat sf = new SimpleDateFormat(format); return sf.format(date); } - - + + /** * * @param day * @param format * @return long - * @throws ParseException + * @throws ParseException */ public static long stringToLong(String day, String format) throws ParseException { - SimpleDateFormat dateFormat = new SimpleDateFormat(format); - long Date = dateFormat.parse(day).getTime(); - return Date; + SimpleDateFormat dateFormat = new SimpleDateFormat(format); + long date = dateFormat.parse(day).getTime(); + return date; } - + /** * @param day * @param format @@ -600,30 +600,30 @@ public static long stringToLong(String day, String format) throws ParseException * @throws ParseException */ public static Date stringToDate(String day, String format) { - try { - SimpleDateFormat dateFormat = new SimpleDateFormat(format); - Date Date = dateFormat.parse(day); - return Date; - } catch (ParseException e) { - return new Date(); - } + try { + SimpleDateFormat dateFormat = new SimpleDateFormat(format); + Date date = dateFormat.parse(day); + return date; + } catch (ParseException e) { + return new Date(); + } } - - + + /** * long型时间戳转为String型 - * + * * @param day 秒 * @return 格式化后的日期 - * @throws ParseException + * @throws ParseException */ public static String longToString(long day, String format) throws ParseException { - if (("" + day).length() <= 10){ + if (("" + day).length() <= 10){ day=day*1000; } - SimpleDateFormat dateFormat = new SimpleDateFormat(format); - String Date = dateFormat.format(day); - return Date; + SimpleDateFormat dateFormat = new SimpleDateFormat(format); + String date = dateFormat.format(day); + return date; } /** @@ -653,39 +653,39 @@ public static long getMillByNow() { return cal.getTimeInMillis(); } - public static int getWeeksBetweenTwoDates(long startDay, long endDay) { - int week = getWeekOfYear(endDay) - getWeekOfYear(startDay) + 1; - if(week<1){ - week = getWeekOfYear(endDay) + getMaxWeekOfYear(startDay) - getWeekOfYear(startDay) + 1; - } - return week; - } + public static int getWeeksBetweenTwoDates(long startDay, long endDay) { + int week = getWeekOfYear(endDay) - getWeekOfYear(startDay) + 1; + if(week<1){ + week = getWeekOfYear(endDay) + getMaxWeekOfYear(startDay) - getWeekOfYear(startDay) + 1; + } + return week; + } - public static int getMaxWeekOfYear(long startDay) { - Calendar cal = Calendar.getInstance(); + public static int getMaxWeekOfYear(long startDay) { + Calendar cal = Calendar.getInstance(); cal.setTime(new Date(startDay * 1000)); return cal.getMaximum(Calendar.WEEK_OF_YEAR); - } - - public static int getMonthsBetweenTwoDates(long startDay, long endDay) { - int month = DateUtil.getMonth(endDay) - DateUtil.getMonth(startDay) + 1; - if(month<1){ - month = getMonth(endDay) + 12 - getMonth(startDay) +1; - } - return month; - } - - public static Date parseDate(String dateStr, String pattern){ - SimpleDateFormat sdf = new SimpleDateFormat(); - sdf.applyPattern(pattern); - try { - return sdf.parse(dateStr); - } catch (ParseException e) { - return null; - } - } - - /** + } + + public static int getMonthsBetweenTwoDates(long startDay, long endDay) { + int month = DateUtil.getMonth(endDay) - DateUtil.getMonth(startDay) + 1; + if(month<1){ + month = getMonth(endDay) + 12 - getMonth(startDay) +1; + } + return month; + } + + public static Date parseDate(String dateStr, String pattern){ + SimpleDateFormat sdf = new SimpleDateFormat(); + sdf.applyPattern(pattern); + try { + return sdf.parse(dateStr); + } catch (ParseException e) { + return null; + } + } + + /** * * @param time Long 时间 * @return long @@ -703,7 +703,7 @@ public static long getMinuteStart(long time) { firstDay = cal.getTimeInMillis() / 1000; return firstDay; } - + /** * @param time Long * @return long @@ -732,24 +732,24 @@ public static Date getDateByLong(long time){ date.setTime(time); return date; } - + public static Date parseDate(String dateStr, String pattern, Locale locale){ - SimpleDateFormat df = new SimpleDateFormat( - pattern, locale); - - df.setTimeZone(new SimpleTimeZone(0, "GMT")); - try { - return df.parse(dateStr); - } catch (ParseException e) { - return null; - } - } - + SimpleDateFormat df = new SimpleDateFormat( + pattern, locale); + + df.setTimeZone(new SimpleTimeZone(0, "GMT")); + try { + return df.parse(dateStr); + } catch (ParseException e) { + return null; + } + } + public static String getDate(Date date, String format, Locale locale) { - SimpleDateFormat df = new SimpleDateFormat( - format, locale); - df.setTimeZone(new SimpleTimeZone(0, "GMT")); + SimpleDateFormat df = new SimpleDateFormat( + format, locale); + df.setTimeZone(new SimpleTimeZone(0, "GMT")); return df.format(date); } @@ -777,59 +777,33 @@ public static java.sql.Timestamp columnToTimestamp(Object column) { } public static String dateToString(Date date) { - return dateFormatter.format(date); + LocalDateTime localDateTime = dateToLocalDateTime(date); + return localDateTime.format(DATE_FORMATTER); } public static String timestampToString(Date date) { - return datetimeFormatter.format(date); + LocalDateTime localDateTime = dateToLocalDateTime(date); + return localDateTime.format(DATE_TIME_FORMATTER); } - public static Timestamp getTimestampFromStr(String timeStr) { if (DATETIME.matcher(timeStr).matches()) { Instant instant = Instant.from(ISO_INSTANT.parse(timeStr)); return new Timestamp(instant.getEpochSecond() * MILLIS_PER_SECOND); - } else { - java.sql.Date date = null; - try { - date = new java.sql.Date(datetimeFormatter.parse(timeStr).getTime()); - } catch (ParseException e) { - throw new RuntimeException("getTimestampFromStr error data is " + timeStr); - } - return new Timestamp(date.getTime()); } + return new Timestamp(stringToDate(timeStr).getTime()); } public static java.sql.Date getDateFromStr(String dateStr) { - // 2020-01-01 format if (DATE.matcher(dateStr).matches()) { - // convert from local date to instant Instant instant = LocalDate.parse(dateStr).atTime(LocalTime.of(0, 0, 0, 0)).toInstant(ZoneOffset.UTC); - // calculate the timezone offset in millis int offset = TimeZone.getDefault().getOffset(instant.toEpochMilli()); - // need to remove the offset since time has no TZ component return new java.sql.Date(instant.toEpochMilli() - offset); } else if (DATETIME.matcher(dateStr).matches()) { - // 2020-01-01T12:12:12Z format Instant instant = Instant.from(ISO_INSTANT.parse(dateStr)); return new java.sql.Date(instant.toEpochMilli()); - } else { - try { - // 2020-01-01 12:12:12.0 format - return new java.sql.Date(datetimeFormatter.parse(dateStr).getTime()); - } catch (ParseException e) { - throw new RuntimeException("String convert to Date fail."); - } } + return new java.sql.Date(stringToDate(dateStr).getTime()); } - - - public static String getStringFromTimestamp(Timestamp timestamp) { - return datetimeFormatter.format(timestamp); - } - - public static String getStringFromDate(java.sql.Date date) { - return dateFormatter.format(date); - } - + } diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java index cda1968fc..f48e6e296 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/async/RdbAsyncReqRow.java @@ -178,9 +178,9 @@ private Object convertDataType(Object val) { } else if (val instanceof Instant) { } else if (val instanceof Timestamp) { - val = DateUtil.getStringFromTimestamp((Timestamp) val); + val = DateUtil.timestampToString((Timestamp) val); } else if (val instanceof java.util.Date) { - val = DateUtil.getStringFromDate((java.sql.Date) val); + val = DateUtil.dateToString((java.util.Date)val); } else { val = val.toString(); } From 14446f4dd844b1bb9da6c412e3556e135bafcba4 Mon Sep 17 00:00:00 2001 From: maqi Date: Tue, 17 Mar 2020 16:22:07 +0800 Subject: [PATCH 30/47] remove hbase jdk tools --- hbase/pom.xml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/hbase/pom.xml b/hbase/pom.xml index f64ffab1b..835457e29 100644 --- a/hbase/pom.xml +++ b/hbase/pom.xml @@ -29,13 +29,6 @@ 1.0-SNAPSHOT provided - - jdk.tools - jdk.tools - 1.6 - system - ${JAVA_HOME}/lib/tools.jar - org.apache.hbase From ec2b84e7e1fa0c54aa7602923768b86671ab79b8 Mon Sep 17 00:00:00 2001 From: maqi Date: Tue, 17 Mar 2020 20:49:15 +0800 Subject: [PATCH 31/47] kafk update mode --- .../format/SerializationMetricWrapper.java | 11 +- .../kafka/AbstractKafkaProducerFactory.java | 31 +- .../CustomerKeyedSerializationSchema.java | 35 +- .../AvroCRowSerializationSchema.java | 346 +++++++++++++++++ .../CsvCRowSerializationSchema.java | 349 ++++++++++++++++++ .../JsonCRowSerializationSchema.java | 234 ++++++++++++ .../sql/sink/kafka/table/KafkaSinkParser.java | 2 + .../sink/kafka/table/KafkaSinkTableInfo.java | 13 +- .../KafkaDeserializationMetricWrapper.java | 10 +- .../flink/sql/sink/kafka/KafkaProducer.java | 6 +- .../sql/sink/kafka/KafkaProducerFactory.java | 4 +- .../flink/sql/sink/kafka/KafkaSink.java | 18 +- .../flink/sql/sink/kafka/KafkaProducer09.java | 5 +- .../sink/kafka/KafkaProducer09Factory.java | 3 +- .../flink/sql/sink/kafka/KafkaSink.java | 21 +- .../sql/sink/kafka/KafkaProducer010.java | 6 +- .../sink/kafka/KafkaProducer010Factory.java | 3 +- .../flink/sql/sink/kafka/KafkaSink.java | 21 +- .../sql/sink/kafka/KafkaProducer011.java | 6 +- .../sink/kafka/KafkaProducer011Factory.java | 3 +- .../flink/sql/sink/kafka/KafkaSink.java | 20 +- .../source/kafka/KafkaConsumer011Factory.java | 2 +- .../flink/sql/source/kafka/KafkaSource.java | 4 +- 23 files changed, 1067 insertions(+), 86 deletions(-) create mode 100644 kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/AvroCRowSerializationSchema.java create mode 100644 kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/CsvCRowSerializationSchema.java create mode 100644 kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/JsonCRowSerializationSchema.java diff --git a/core/src/main/java/com/dtstack/flink/sql/format/SerializationMetricWrapper.java b/core/src/main/java/com/dtstack/flink/sql/format/SerializationMetricWrapper.java index 3a5af18b1..8802198a0 100644 --- a/core/src/main/java/com/dtstack/flink/sql/format/SerializationMetricWrapper.java +++ b/core/src/main/java/com/dtstack/flink/sql/format/SerializationMetricWrapper.java @@ -24,6 +24,7 @@ import org.apache.flink.metrics.Counter; import org.apache.flink.metrics.Meter; import org.apache.flink.metrics.MeterView; +import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.types.Row; @@ -34,11 +35,11 @@ * author: toutian * create: 2019/12/24 */ -public class SerializationMetricWrapper implements SerializationSchema { +public class SerializationMetricWrapper implements SerializationSchema { private static final long serialVersionUID = 1L; - private SerializationSchema serializationSchema; + private SerializationSchema serializationSchema; private transient RuntimeContext runtimeContext; @@ -47,7 +48,7 @@ public class SerializationMetricWrapper implements SerializationSchema { protected transient Meter dtNumRecordsOutRate; - public SerializationMetricWrapper(SerializationSchema serializationSchema) { + public SerializationMetricWrapper(SerializationSchema serializationSchema) { this.serializationSchema = serializationSchema; } @@ -57,7 +58,7 @@ public void initMetric() { } @Override - public byte[] serialize(Row element) { + public byte[] serialize(CRow element) { beforeSerialize(); byte[] row = serializationSchema.serialize(element); afterSerialize(); @@ -79,7 +80,7 @@ public void setRuntimeContext(RuntimeContext runtimeContext) { this.runtimeContext = runtimeContext; } - public SerializationSchema getSerializationSchema() { + public SerializationSchema getSerializationSchema() { return serializationSchema; } diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java index 88c2ca939..ebd313b29 100644 --- a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java @@ -19,15 +19,18 @@ import com.dtstack.flink.sql.format.FormatType; import com.dtstack.flink.sql.format.SerializationMetricWrapper; +import com.dtstack.flink.sql.sink.kafka.serialization.AvroCRowSerializationSchema; +import com.dtstack.flink.sql.sink.kafka.serialization.CsvCRowSerializationSchema; +import com.dtstack.flink.sql.sink.kafka.serialization.JsonCRowSerializationSchema; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.formats.avro.AvroRowSerializationSchema; import org.apache.flink.formats.csv.CsvRowSerializationSchema; -import org.apache.flink.formats.json.JsonRowSerializationSchema; import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; +import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.types.Row; import java.util.Optional; @@ -51,42 +54,36 @@ public abstract class AbstractKafkaProducerFactory { * @param partitioner * @return */ - public abstract RichSinkFunction createKafkaProducer(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation, Properties properties, Optional> partitioner, String[] partitionKeys); + public abstract RichSinkFunction createKafkaProducer(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation, Properties properties, Optional> partitioner, String[] partitionKeys); - protected SerializationMetricWrapper createSerializationMetricWrapper(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation) { - return new SerializationMetricWrapper(createSerializationSchema(kafkaSinkTableInfo, typeInformation)); + protected SerializationMetricWrapper createSerializationMetricWrapper(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation) { + SerializationSchema serializationSchema = createSerializationSchema(kafkaSinkTableInfo, typeInformation); + return new SerializationMetricWrapper(serializationSchema); } - private SerializationSchema createSerializationSchema(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation) { - SerializationSchema serializationSchema = null; + private SerializationSchema createSerializationSchema(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation) { + SerializationSchema serializationSchema = null; if (FormatType.JSON.name().equalsIgnoreCase(kafkaSinkTableInfo.getSinkDataType())) { - if (StringUtils.isNotBlank(kafkaSinkTableInfo.getSchemaString())) { - serializationSchema = new JsonRowSerializationSchema(kafkaSinkTableInfo.getSchemaString()); + serializationSchema = new JsonCRowSerializationSchema(kafkaSinkTableInfo.getSchemaString(), kafkaSinkTableInfo.getUpdateMode()); } else if (typeInformation != null && typeInformation.getArity() != 0) { - serializationSchema = new JsonRowSerializationSchema(typeInformation); + serializationSchema = new JsonCRowSerializationSchema(typeInformation, kafkaSinkTableInfo.getUpdateMode()); } else { throw new IllegalArgumentException("sinkDataType:" + FormatType.JSON.name() + " must set schemaString(JSON Schema)or TypeInformation"); } - } else if (FormatType.CSV.name().equalsIgnoreCase(kafkaSinkTableInfo.getSinkDataType())) { - if (StringUtils.isBlank(kafkaSinkTableInfo.getFieldDelimiter())) { throw new IllegalArgumentException("sinkDataType:" + FormatType.CSV.name() + " must set fieldDelimiter"); } - final CsvRowSerializationSchema.Builder serSchemaBuilder = new CsvRowSerializationSchema.Builder(typeInformation); + final CsvCRowSerializationSchema.Builder serSchemaBuilder = new CsvCRowSerializationSchema.Builder(typeInformation); serSchemaBuilder.setFieldDelimiter(kafkaSinkTableInfo.getFieldDelimiter().toCharArray()[0]); serializationSchema = serSchemaBuilder.build(); - } else if (FormatType.AVRO.name().equalsIgnoreCase(kafkaSinkTableInfo.getSinkDataType())) { - if (StringUtils.isBlank(kafkaSinkTableInfo.getSchemaString())) { throw new IllegalArgumentException("sinkDataType:" + FormatType.AVRO.name() + " must set schemaString"); } - - serializationSchema = new AvroRowSerializationSchema(kafkaSinkTableInfo.getSchemaString()); - + serializationSchema = new AvroCRowSerializationSchema(kafkaSinkTableInfo.getSchemaString(),kafkaSinkTableInfo.getUpdateMode()); } if (null == serializationSchema) { diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerKeyedSerializationSchema.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerKeyedSerializationSchema.java index 498766564..cde8d1b1d 100644 --- a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerKeyedSerializationSchema.java +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerKeyedSerializationSchema.java @@ -2,18 +2,20 @@ import com.dtstack.flink.sql.format.SerializationMetricWrapper; +import com.dtstack.flink.sql.sink.kafka.serialization.JsonCRowSerializationSchema; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.formats.json.JsonRowSerializationSchema; import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper; import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema; +import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.types.Row; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.atomic.AtomicLong; -public class CustomerKeyedSerializationSchema implements KeyedSerializationSchema { +public class CustomerKeyedSerializationSchema implements KeyedSerializationSchema { private static final Logger LOG = LoggerFactory.getLogger(CustomerKeyedSerializationSchema.class); @@ -30,38 +32,41 @@ public CustomerKeyedSerializationSchema(SerializationMetricWrapper serialization this.mapper = new ObjectMapper(); } - public byte[] serializeKey(Row element) { - if(partitionKeys == null || partitionKeys.length <=0){ + @Override + public byte[] serializeKey(CRow element) { + if (partitionKeys == null || partitionKeys.length <= 0) { return null; - } - SerializationSchema serializationSchema = serializationMetricWrapper.getSerializationSchema(); - if(serializationSchema instanceof JsonRowSerializationSchema){ - return serializeJsonKey((JsonRowSerializationSchema) serializationSchema, element); + } + SerializationSchema serializationSchema = serializationMetricWrapper.getSerializationSchema(); + if (serializationSchema instanceof JsonCRowSerializationSchema) { + return serializeJsonKey((JsonCRowSerializationSchema) serializationSchema, element); } return null; } - public byte[] serializeValue(Row element) { + @Override + public byte[] serializeValue(CRow element) { return this.serializationMetricWrapper.serialize(element); } - public String getTargetTopic(Row element) { + @Override + public String getTargetTopic(CRow element) { return null; } - private byte[] serializeJsonKey(JsonRowSerializationSchema jsonRowSerializationSchema, Row element) { + private byte[] serializeJsonKey(JsonCRowSerializationSchema jsonCRowSerializationSchema, CRow element) { try { - byte[] data = jsonRowSerializationSchema.serialize(element); + byte[] data = jsonCRowSerializationSchema.serialize(element); ObjectNode objectNode = mapper.readValue(data, ObjectNode.class); StringBuilder sb = new StringBuilder(); - for(String key : partitionKeys){ - if(objectNode.has(key)){ + for (String key : partitionKeys) { + if (objectNode.has(key)) { sb.append(objectNode.get(key.trim())); } } return sb.toString().getBytes(); - } catch (Exception e){ - if(COUNTER.getAndIncrement() % 1000 == 0){ + } catch (Exception e) { + if (COUNTER.getAndIncrement() % 1000 == 0) { LOG.error("serializeJsonKey error", e); } } diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/AvroCRowSerializationSchema.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/AvroCRowSerializationSchema.java new file mode 100644 index 000000000..34fa22c99 --- /dev/null +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/AvroCRowSerializationSchema.java @@ -0,0 +1,346 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.dtstack.flink.sql.sink.kafka.serialization; + +import com.dtstack.flink.sql.enums.EUpdateMode; +import org.apache.avro.LogicalType; +import org.apache.avro.LogicalTypes; +import org.apache.avro.Schema; +import org.apache.avro.SchemaParseException; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumWriter; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.generic.IndexedRecord; +import org.apache.avro.io.DatumWriter; +import org.apache.avro.io.Encoder; +import org.apache.avro.io.EncoderFactory; +import org.apache.avro.specific.SpecificData; +import org.apache.avro.specific.SpecificDatumWriter; +import org.apache.avro.specific.SpecificRecord; +import org.apache.avro.util.Utf8; +import org.apache.commons.lang3.StringUtils; +import org.apache.flink.api.common.serialization.SerializationSchema; +import org.apache.flink.formats.avro.AvroRowDeserializationSchema; +import org.apache.flink.formats.avro.typeutils.AvroSchemaConverter; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.types.Row; +import org.apache.flink.util.Preconditions; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.math.BigDecimal; +import java.nio.ByteBuffer; +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TimeZone; + +/** + * Serialization schema that serializes CROW into Avro bytes. + * + *

Serializes objects that are represented in (nested) Flink rows. It support types that + * are compatible with Flink's Table & SQL API. + * + *

Note: Changes in this class need to be kept in sync with the corresponding runtime + * class {@link AvroRowDeserializationSchema} and schema converter {@link AvroSchemaConverter}. + * + * @author maqi + */ +public class AvroCRowSerializationSchema implements SerializationSchema { + + /** + * Used for time conversions from SQL types. + */ + private static final TimeZone LOCAL_TZ = TimeZone.getDefault(); + + /** + * Avro record class for serialization. Might be null if record class is not available. + */ + private Class recordClazz; + + /** + * Schema string for deserialization. + */ + private String schemaString; + + /** + * Avro serialization schema. + */ + private transient Schema schema; + + /** + * Writer to serialize Avro record into a byte array. + */ + private transient DatumWriter datumWriter; + + /** + * Output stream to serialize records into byte array. + */ + private transient ByteArrayOutputStream arrayOutputStream; + + /** + * Low-level class for serialization of Avro values. + */ + private transient Encoder encoder; + + private String updateMode; + + private final String retractKey = "retract"; + + /** + * Creates an Avro serialization schema for the given specific record class. + * + * @param recordClazz Avro record class used to serialize Flink's row to Avro's record + */ + public AvroCRowSerializationSchema(Class recordClazz) { + Preconditions.checkNotNull(recordClazz, "Avro record class must not be null."); + this.recordClazz = recordClazz; + this.schema = SpecificData.get().getSchema(recordClazz); + this.schemaString = schema.toString(); + this.datumWriter = new SpecificDatumWriter<>(schema); + this.arrayOutputStream = new ByteArrayOutputStream(); + this.encoder = EncoderFactory.get().binaryEncoder(arrayOutputStream, null); + } + + /** + * Creates an Avro serialization schema for the given Avro schema string. + * + * @param avroSchemaString Avro schema string used to serialize Flink's row to Avro's record + */ + public AvroCRowSerializationSchema(String avroSchemaString,String updateMode) { + Preconditions.checkNotNull(avroSchemaString, "Avro schema must not be null."); + this.recordClazz = null; + this.schemaString = avroSchemaString; + try { + this.schema = new Schema.Parser().parse(avroSchemaString); + } catch (SchemaParseException e) { + throw new IllegalArgumentException("Could not parse Avro schema string.", e); + } + this.datumWriter = new GenericDatumWriter<>(schema); + this.arrayOutputStream = new ByteArrayOutputStream(); + this.encoder = EncoderFactory.get().binaryEncoder(arrayOutputStream, null); + this.updateMode = updateMode; + } + + @Override + public byte[] serialize(CRow crow) { + try { + Row row = crow.row(); + boolean change = crow.change(); + + // convert to record + final GenericRecord record = convertRowToAvroRecord(schema, row); + arrayOutputStream.reset(); + if (StringUtils.equalsIgnoreCase(updateMode, EUpdateMode.UPSERT.name())) { + record.put(retractKey, change); + } + datumWriter.write(record, encoder); + encoder.flush(); + return arrayOutputStream.toByteArray(); + } catch (Exception e) { + throw new RuntimeException("Failed to serialize row.", e); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final AvroCRowSerializationSchema that = (AvroCRowSerializationSchema) o; + return Objects.equals(recordClazz, that.recordClazz) && Objects.equals(schemaString, that.schemaString); + } + + @Override + public int hashCode() { + return Objects.hash(recordClazz, schemaString); + } + + // -------------------------------------------------------------------------------------------- + + private GenericRecord convertRowToAvroRecord(Schema schema, Row row) { + final List fields = schema.getFields(); + final int length = fields.size(); + final GenericRecord record = new GenericData.Record(schema); + for (int i = 0; i < length; i++) { + final Schema.Field field = fields.get(i); + record.put(i, convertFlinkType(field.schema(), row.getField(i))); + } + return record; + } + + private Object convertFlinkType(Schema schema, Object object) { + if (object == null) { + return null; + } + switch (schema.getType()) { + case RECORD: + if (object instanceof Row) { + return convertRowToAvroRecord(schema, (Row) object); + } + throw new IllegalStateException("Row expected but was: " + object.getClass()); + case ENUM: + return new GenericData.EnumSymbol(schema, object.toString()); + case ARRAY: + final Schema elementSchema = schema.getElementType(); + final Object[] array = (Object[]) object; + final GenericData.Array convertedArray = new GenericData.Array<>(array.length, schema); + for (Object element : array) { + convertedArray.add(convertFlinkType(elementSchema, element)); + } + return convertedArray; + case MAP: + final Map map = (Map) object; + final Map convertedMap = new HashMap<>(); + for (Map.Entry entry : map.entrySet()) { + convertedMap.put( + new Utf8(entry.getKey().toString()), + convertFlinkType(schema.getValueType(), entry.getValue())); + } + return convertedMap; + case UNION: + final List types = schema.getTypes(); + final int size = types.size(); + final Schema actualSchema; + if (size == 2 && types.get(0).getType() == Schema.Type.NULL) { + actualSchema = types.get(1); + } else if (size == 2 && types.get(1).getType() == Schema.Type.NULL) { + actualSchema = types.get(0); + } else if (size == 1) { + actualSchema = types.get(0); + } else { + // generic type + return object; + } + return convertFlinkType(actualSchema, object); + case FIXED: + // check for logical type + if (object instanceof BigDecimal) { + return new GenericData.Fixed( + schema, + convertFromDecimal(schema, (BigDecimal) object)); + } + return new GenericData.Fixed(schema, (byte[]) object); + case STRING: + return new Utf8(object.toString()); + case BYTES: + // check for logical type + if (object instanceof BigDecimal) { + return ByteBuffer.wrap(convertFromDecimal(schema, (BigDecimal) object)); + } + return ByteBuffer.wrap((byte[]) object); + case INT: + // check for logical types + if (object instanceof Date) { + return convertFromDate(schema, (Date) object); + } else if (object instanceof Time) { + return convertFromTime(schema, (Time) object); + } + return object; + case LONG: + // check for logical type + if (object instanceof Timestamp) { + return convertFromTimestamp(schema, (Timestamp) object); + } + return object; + case FLOAT: + case DOUBLE: + case BOOLEAN: + return object; + } + throw new RuntimeException("Unsupported Avro type:" + schema); + } + + private byte[] convertFromDecimal(Schema schema, BigDecimal decimal) { + final LogicalType logicalType = schema.getLogicalType(); + if (logicalType instanceof LogicalTypes.Decimal) { + final LogicalTypes.Decimal decimalType = (LogicalTypes.Decimal) logicalType; + // rescale to target type + final BigDecimal rescaled = decimal.setScale(decimalType.getScale(), BigDecimal.ROUND_UNNECESSARY); + // byte array must contain the two's-complement representation of the + // unscaled integer value in big-endian byte order + return decimal.unscaledValue().toByteArray(); + } else { + throw new RuntimeException("Unsupported decimal type."); + } + } + + private int convertFromDate(Schema schema, Date date) { + final LogicalType logicalType = schema.getLogicalType(); + if (logicalType == LogicalTypes.date()) { + // adopted from Apache Calcite + final long time = date.getTime(); + final long converted = time + (long) LOCAL_TZ.getOffset(time); + return (int) (converted / 86400000L); + } else { + throw new RuntimeException("Unsupported date type."); + } + } + + private int convertFromTime(Schema schema, Time date) { + final LogicalType logicalType = schema.getLogicalType(); + if (logicalType == LogicalTypes.timeMillis()) { + // adopted from Apache Calcite + final long time = date.getTime(); + final long converted = time + (long) LOCAL_TZ.getOffset(time); + return (int) (converted % 86400000L); + } else { + throw new RuntimeException("Unsupported time type."); + } + } + + private long convertFromTimestamp(Schema schema, Timestamp date) { + final LogicalType logicalType = schema.getLogicalType(); + if (logicalType == LogicalTypes.timestampMillis()) { + // adopted from Apache Calcite + final long time = date.getTime(); + return time + (long) LOCAL_TZ.getOffset(time); + } else { + throw new RuntimeException("Unsupported timestamp type."); + } + } + + private void writeObject(ObjectOutputStream outputStream) throws IOException { + outputStream.writeObject(recordClazz); + outputStream.writeObject(schemaString); // support for null + } + + @SuppressWarnings("unchecked") + private void readObject(ObjectInputStream inputStream) throws ClassNotFoundException, IOException { + recordClazz = (Class) inputStream.readObject(); + schemaString = (String) inputStream.readObject(); + if (recordClazz != null) { + schema = SpecificData.get().getSchema(recordClazz); + } else { + schema = new Schema.Parser().parse(schemaString); + } + datumWriter = new SpecificDatumWriter<>(schema); + arrayOutputStream = new ByteArrayOutputStream(); + encoder = EncoderFactory.get().binaryEncoder(arrayOutputStream, null); + } +} diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/CsvCRowSerializationSchema.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/CsvCRowSerializationSchema.java new file mode 100644 index 000000000..903395f9d --- /dev/null +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/CsvCRowSerializationSchema.java @@ -0,0 +1,349 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.dtstack.flink.sql.sink.kafka.serialization; + +import com.dtstack.flink.sql.enums.EUpdateMode; +import org.apache.commons.lang3.StringUtils; +import org.apache.flink.annotation.PublicEvolving; +import org.apache.flink.api.common.serialization.SerializationSchema; +import org.apache.flink.api.common.typeinfo.BasicArrayTypeInfo; +import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.common.typeinfo.Types; +import org.apache.flink.api.java.typeutils.ObjectArrayTypeInfo; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.formats.csv.CsvRowDeserializationSchema; +import org.apache.flink.formats.csv.CsvRowSchemaConverter; +import org.apache.flink.formats.csv.CsvRowSerializationSchema; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectWriter; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ArrayNode; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ContainerNode; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.csv.CsvMapper; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.csv.CsvSchema; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.table.runtime.types.CRowTypeInfo; +import org.apache.flink.types.Row; +import org.apache.flink.util.Preconditions; + +import java.io.Serializable; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.Arrays; +import java.util.Objects; + +/** + * Serialization schema that serializes an object of Flink types into a CSV bytes. + * + *

Serializes the input row into a {@link ObjectNode} and + * converts it into byte[]. + * + *

Result byte[] messages can be deserialized using {@link CsvRowDeserializationSchema}. + */ +@PublicEvolving +public final class CsvCRowSerializationSchema implements SerializationSchema { + + private static final long serialVersionUID = 2098447220136965L; + + /** Type information describing the input CSV data. */ + private final RowTypeInfo typeInfo; + + /** Runtime instance that performs the actual work. */ + private final RuntimeConverter runtimeConverter; + + /** CsvMapper used to write {@link JsonNode} into bytes. */ + private final CsvMapper csvMapper; + + /** Schema describing the input CSV data. */ + private final CsvSchema csvSchema; + + /** Object writer used to write rows. It is configured by {@link CsvSchema}. */ + private final ObjectWriter objectWriter; + + /** Reusable object node. */ + private transient ObjectNode root; + + private String updateMode; + + private final String retractKey = "retract"; + + private CsvCRowSerializationSchema( + RowTypeInfo typeInfo, + CsvSchema csvSchema) { + this.typeInfo = typeInfo; + this.runtimeConverter = createRowRuntimeConverter(typeInfo, true); + this.csvMapper = new CsvMapper(); + this.csvSchema = csvSchema; + this.objectWriter = csvMapper.writer(csvSchema); + } + + /** + * A builder for creating a {@link CsvRowSerializationSchema}. + */ + @PublicEvolving + public static class Builder { + + private final RowTypeInfo typeInfo; + private CsvSchema csvSchema; + + /** + * Creates a {@link CsvRowSerializationSchema} expecting the given {@link TypeInformation}. + * + * @param typeInfo type information used to create schema. + */ + public Builder(TypeInformation typeInfo) { + Preconditions.checkNotNull(typeInfo, "Type information must not be null."); + + if (!(typeInfo instanceof CRowTypeInfo)) { + throw new IllegalArgumentException("Row type information expected."); + } + RowTypeInfo rowTypeInfo = ((CRowTypeInfo) typeInfo).rowType(); + this.typeInfo = rowTypeInfo; + this.csvSchema = CsvRowSchemaConverter.convert(rowTypeInfo); + } + + public Builder setFieldDelimiter(char c) { + this.csvSchema = this.csvSchema.rebuild().setColumnSeparator(c).build(); + return this; + } + + public Builder setLineDelimiter(String delimiter) { + Preconditions.checkNotNull(delimiter, "Delimiter must not be null."); + if (!delimiter.equals("\n") && !delimiter.equals("\r") && !delimiter.equals("\r\n")) { + throw new IllegalArgumentException( + "Unsupported new line delimiter. Only \\n, \\r, or \\r\\n are supported."); + } + this.csvSchema = this.csvSchema.rebuild().setLineSeparator(delimiter).build(); + return this; + } + + public Builder setArrayElementDelimiter(String delimiter) { + Preconditions.checkNotNull(delimiter, "Delimiter must not be null."); + this.csvSchema = this.csvSchema.rebuild().setArrayElementSeparator(delimiter).build(); + return this; + } + + public Builder setQuoteCharacter(char c) { + this.csvSchema = this.csvSchema.rebuild().setQuoteChar(c).build(); + return this; + } + + public Builder setEscapeCharacter(char c) { + this.csvSchema = this.csvSchema.rebuild().setEscapeChar(c).build(); + return this; + } + + public Builder setNullLiteral(String s) { + this.csvSchema = this.csvSchema.rebuild().setNullValue(s).build(); + return this; + } + + public CsvCRowSerializationSchema build() { + return new CsvCRowSerializationSchema( + typeInfo, + csvSchema); + } + } + + @Override + public byte[] serialize(CRow crow) { + Row row = crow.row(); + boolean change = crow.change(); + if (root == null) { + root = csvMapper.createObjectNode(); + } + try { + runtimeConverter.convert(csvMapper, root, row); + if (StringUtils.equalsIgnoreCase(updateMode, EUpdateMode.UPSERT.name())) { + root.put(retractKey, change); + } + return objectWriter.writeValueAsBytes(root); + } catch (Throwable t) { + throw new RuntimeException("Could not serialize row '" + row + "'.", t); + } + } + + @Override + public boolean equals(Object o) { + if (o == null || o.getClass() != this.getClass()) { + return false; + } + if (this == o) { + return true; + } + final CsvCRowSerializationSchema that = (CsvCRowSerializationSchema) o; + final CsvSchema otherSchema = that.csvSchema; + + return typeInfo.equals(that.typeInfo) && + csvSchema.getColumnSeparator() == otherSchema.getColumnSeparator() && + Arrays.equals(csvSchema.getLineSeparator(), otherSchema.getLineSeparator()) && + csvSchema.getArrayElementSeparator().equals(otherSchema.getArrayElementSeparator()) && + csvSchema.getQuoteChar() == otherSchema.getQuoteChar() && + csvSchema.getEscapeChar() == otherSchema.getEscapeChar() && + Arrays.equals(csvSchema.getNullValue(), otherSchema.getNullValue()); + } + + @Override + public int hashCode() { + return Objects.hash( + typeInfo, + csvSchema.getColumnSeparator(), + csvSchema.getLineSeparator(), + csvSchema.getArrayElementSeparator(), + csvSchema.getQuoteChar(), + csvSchema.getEscapeChar(), + csvSchema.getNullValue()); + } + + // -------------------------------------------------------------------------------------------- + + private interface RuntimeConverter extends Serializable { + JsonNode convert(CsvMapper csvMapper, ContainerNode container, Object obj); + } + + private static RuntimeConverter createRowRuntimeConverter(RowTypeInfo rowTypeInfo, boolean isTopLevel) { + final TypeInformation[] fieldTypes = rowTypeInfo.getFieldTypes(); + final String[] fieldNames = rowTypeInfo.getFieldNames(); + + final RuntimeConverter[] fieldConverters = createFieldRuntimeConverters(fieldTypes); + + return assembleRowRuntimeConverter(isTopLevel, fieldNames, fieldConverters); + } + + private static RuntimeConverter[] createFieldRuntimeConverters(TypeInformation[] fieldTypes) { + final RuntimeConverter[] fieldConverters = new RuntimeConverter[fieldTypes.length]; + for (int i = 0; i < fieldTypes.length; i++) { + fieldConverters[i] = createNullableRuntimeConverter(fieldTypes[i]); + } + return fieldConverters; + } + + private static RuntimeConverter assembleRowRuntimeConverter( + boolean isTopLevel, + String[] fieldNames, + RuntimeConverter[] fieldConverters) { + final int rowArity = fieldNames.length; + // top level reuses the object node container + if (isTopLevel) { + return (csvMapper, container, obj) -> { + final Row row = (Row) obj; + + validateArity(rowArity, row.getArity()); + + final ObjectNode objectNode = (ObjectNode) container; + for (int i = 0; i < rowArity; i++) { + objectNode.set( + fieldNames[i], + fieldConverters[i].convert(csvMapper, container, row.getField(i))); + } + return objectNode; + }; + } else { + return (csvMapper, container, obj) -> { + final Row row = (Row) obj; + + validateArity(rowArity, row.getArity()); + + final ArrayNode arrayNode = csvMapper.createArrayNode(); + for (int i = 0; i < rowArity; i++) { + arrayNode.add(fieldConverters[i].convert(csvMapper, arrayNode, row.getField(i))); + } + return arrayNode; + }; + } + } + + private static RuntimeConverter createNullableRuntimeConverter(TypeInformation info) { + final RuntimeConverter valueConverter = createRuntimeConverter(info); + return (csvMapper, container, obj) -> { + if (obj == null) { + return container.nullNode(); + } + return valueConverter.convert(csvMapper, container, obj); + }; + } + + private static RuntimeConverter createRuntimeConverter(TypeInformation info) { + if (info.equals(Types.VOID)) { + return (csvMapper, container, obj) -> container.nullNode(); + } else if (info.equals(Types.STRING)) { + return (csvMapper, container, obj) -> container.textNode((String) obj); + } else if (info.equals(Types.BOOLEAN)) { + return (csvMapper, container, obj) -> container.booleanNode((Boolean) obj); + } else if (info.equals(Types.BYTE)) { + return (csvMapper, container, obj) -> container.numberNode((Byte) obj); + } else if (info.equals(Types.SHORT)) { + return (csvMapper, container, obj) -> container.numberNode((Short) obj); + } else if (info.equals(Types.INT)) { + return (csvMapper, container, obj) -> container.numberNode((Integer) obj); + } else if (info.equals(Types.LONG)) { + return (csvMapper, container, obj) -> container.numberNode((Long) obj); + } else if (info.equals(Types.FLOAT)) { + return (csvMapper, container, obj) -> container.numberNode((Float) obj); + } else if (info.equals(Types.DOUBLE)) { + return (csvMapper, container, obj) -> container.numberNode((Double) obj); + } else if (info.equals(Types.BIG_DEC)) { + return (csvMapper, container, obj) -> container.numberNode((BigDecimal) obj); + } else if (info.equals(Types.BIG_INT)) { + return (csvMapper, container, obj) -> container.numberNode((BigInteger) obj); + } else if (info.equals(Types.SQL_DATE)) { + return (csvMapper, container, obj) -> container.textNode(obj.toString()); + } else if (info.equals(Types.SQL_TIME)) { + return (csvMapper, container, obj) -> container.textNode(obj.toString()); + } else if (info.equals(Types.SQL_TIMESTAMP)) { + return (csvMapper, container, obj) -> container.textNode(obj.toString()); + } else if (info instanceof RowTypeInfo){ + return createRowRuntimeConverter((RowTypeInfo) info, false); + } else if (info instanceof BasicArrayTypeInfo) { + return createObjectArrayRuntimeConverter(((BasicArrayTypeInfo) info).getComponentInfo()); + } else if (info instanceof ObjectArrayTypeInfo) { + return createObjectArrayRuntimeConverter(((ObjectArrayTypeInfo) info).getComponentInfo()); + } else if (info instanceof PrimitiveArrayTypeInfo && + ((PrimitiveArrayTypeInfo) info).getComponentType() == Types.BYTE) { + return createByteArrayRuntimeConverter(); + } + else { + throw new RuntimeException("Unsupported type information '" + info + "'."); + } + } + + private static RuntimeConverter createObjectArrayRuntimeConverter(TypeInformation elementType) { + final RuntimeConverter elementConverter = createNullableRuntimeConverter(elementType); + return (csvMapper, container, obj) -> { + final Object[] array = (Object[]) obj; + final ArrayNode arrayNode = csvMapper.createArrayNode(); + for (Object element : array) { + arrayNode.add(elementConverter.convert(csvMapper, arrayNode, element)); + } + return arrayNode; + }; + } + + private static RuntimeConverter createByteArrayRuntimeConverter() { + return (csvMapper, container, obj) -> container.binaryNode((byte[]) obj); + } + + private static void validateArity(int expected, int actual) { + if (expected != actual) { + throw new RuntimeException("Row length mismatch. " + expected + + " fields expected but was " + actual + "."); + } + } +} diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/JsonCRowSerializationSchema.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/JsonCRowSerializationSchema.java new file mode 100644 index 000000000..bfe801d52 --- /dev/null +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/JsonCRowSerializationSchema.java @@ -0,0 +1,234 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.dtstack.flink.sql.sink.kafka.serialization; + +import com.dtstack.flink.sql.enums.EUpdateMode; +import org.apache.commons.lang3.StringUtils; +import org.apache.flink.api.common.serialization.SerializationSchema; +import org.apache.flink.api.common.typeinfo.BasicArrayTypeInfo; +import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.common.typeinfo.Types; +import org.apache.flink.api.java.typeutils.ObjectArrayTypeInfo; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.formats.json.JsonRowSchemaConverter; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ArrayNode; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ContainerNode; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.table.runtime.types.CRowTypeInfo; +import org.apache.flink.types.Row; +import org.apache.flink.util.Preconditions; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.sql.Time; +import java.sql.Timestamp; +import java.text.SimpleDateFormat; +import java.util.Objects; + +/** + * + * Serialization schema that serializes an object of Flink types into a JSON bytes. + * + *

Serializes the input Flink object into a JSON string and + * converts it into byte[]. + * + */ +public class JsonCRowSerializationSchema implements SerializationSchema { + + private static final long serialVersionUID = -2885556750743978636L; + + /** Type information describing the input type. */ + private final TypeInformation typeInfo; + + /** Object mapper that is used to create output JSON objects. */ + private final ObjectMapper mapper = new ObjectMapper(); + + /** Formatter for RFC 3339-compliant string representation of a time value (with UTC timezone, without milliseconds). */ + private SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm:ss'Z'"); + + /** Formatter for RFC 3339-compliant string representation of a time value (with UTC timezone). */ + private SimpleDateFormat timeFormatWithMillis = new SimpleDateFormat("HH:mm:ss.SSS'Z'"); + + /** Formatter for RFC 3339-compliant string representation of a timestamp value (with UTC timezone). */ + private SimpleDateFormat timestampFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); + + /** Reusable object node. */ + private transient ObjectNode node; + + private String updateMode; + + private final String retractKey = "retract"; + + public JsonCRowSerializationSchema(String jsonSchema, String updateMode) { + this(JsonRowSchemaConverter.convert(jsonSchema), updateMode); + } + + /** + * Creates a JSON serialization schema for the given type information. + * + * @param typeInfo The field names of {@link Row} are used to map to JSON properties. + */ + public JsonCRowSerializationSchema(TypeInformation typeInfo, String updateMode) { + Preconditions.checkNotNull(typeInfo, "Type information"); + this.typeInfo = typeInfo; + this.updateMode = updateMode; + } + + + @Override + public byte[] serialize(CRow crow) { + Row row = crow.row(); + boolean change = crow.change(); + if (node == null) { + node = mapper.createObjectNode(); + } + + RowTypeInfo rowTypeInfo = ((CRowTypeInfo) typeInfo).rowType(); + try { + convertRow(node, rowTypeInfo, row); + if (StringUtils.equalsIgnoreCase(updateMode, EUpdateMode.UPSERT.name())) { + node.put(retractKey, change); + } + return mapper.writeValueAsBytes(node); + } catch (Throwable t) { + throw new RuntimeException("Could not serialize row '" + row + "'. " + + "Make sure that the schema matches the input.", t); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final JsonCRowSerializationSchema that = (JsonCRowSerializationSchema) o; + return Objects.equals(typeInfo, that.typeInfo); + } + + @Override + public int hashCode() { + return Objects.hash(typeInfo); + } + + // -------------------------------------------------------------------------------------------- + + private ObjectNode convertRow(ObjectNode reuse, RowTypeInfo info, Row row) { + if (reuse == null) { + reuse = mapper.createObjectNode(); + } + final String[] fieldNames = info.getFieldNames(); + + final TypeInformation[] fieldTypes = info.getFieldTypes(); + + // validate the row + if (row.getArity() != fieldNames.length) { + throw new IllegalStateException(String.format( + "Number of elements in the row '%s' is different from number of field names: %d", row, fieldNames.length)); + } + + for (int i = 0; i < fieldNames.length; i++) { + final String name = fieldNames[i]; + + final JsonNode fieldConverted = convert(reuse, reuse.get(name), fieldTypes[i], row.getField(i)); + reuse.set(name, fieldConverted); + } + + return reuse; + } + + private JsonNode convert(ContainerNode container, JsonNode reuse, TypeInformation info, Object object) { + if (info == Types.VOID || object == null) { + return container.nullNode(); + } else if (info == Types.BOOLEAN) { + return container.booleanNode((Boolean) object); + } else if (info == Types.STRING) { + return container.textNode((String) object); + } else if (info == Types.BIG_DEC) { + // convert decimal if necessary + if (object instanceof BigDecimal) { + return container.numberNode((BigDecimal) object); + } + return container.numberNode(BigDecimal.valueOf(((Number) object).doubleValue())); + } else if (info == Types.BIG_INT) { + // convert integer if necessary + if (object instanceof BigInteger) { + return container.numberNode((BigInteger) object); + } + return container.numberNode(BigInteger.valueOf(((Number) object).longValue())); + } else if (info == Types.SQL_DATE) { + return container.textNode(object.toString()); + } else if (info == Types.SQL_TIME) { + final Time time = (Time) object; + // strip milliseconds if possible + if (time.getTime() % 1000 > 0) { + return container.textNode(timeFormatWithMillis.format(time)); + } + return container.textNode(timeFormat.format(time)); + } else if (info == Types.SQL_TIMESTAMP) { + return container.textNode(timestampFormat.format((Timestamp) object)); + } else if (info instanceof RowTypeInfo) { + if (reuse != null && reuse instanceof ObjectNode) { + return convertRow((ObjectNode) reuse, (RowTypeInfo) info, (Row) object); + } else { + return convertRow(null, (RowTypeInfo) info, (Row) object); + } + } else if (info instanceof ObjectArrayTypeInfo) { + if (reuse != null && reuse instanceof ArrayNode) { + return convertObjectArray((ArrayNode) reuse, ((ObjectArrayTypeInfo) info).getComponentInfo(), (Object[]) object); + } else { + return convertObjectArray(null, ((ObjectArrayTypeInfo) info).getComponentInfo(), (Object[]) object); + } + } else if (info instanceof BasicArrayTypeInfo) { + if (reuse != null && reuse instanceof ArrayNode) { + return convertObjectArray((ArrayNode) reuse, ((BasicArrayTypeInfo) info).getComponentInfo(), (Object[]) object); + } else { + return convertObjectArray(null, ((BasicArrayTypeInfo) info).getComponentInfo(), (Object[]) object); + } + } else if (info instanceof PrimitiveArrayTypeInfo && ((PrimitiveArrayTypeInfo) info).getComponentType() == Types.BYTE) { + return container.binaryNode((byte[]) object); + } else { + // for types that were specified without JSON schema + // e.g. POJOs + try { + return mapper.valueToTree(object); + } catch (IllegalArgumentException e) { + throw new IllegalStateException("Unsupported type information '" + info + "' for object: " + object, e); + } + } + } + + private ArrayNode convertObjectArray(ArrayNode reuse, TypeInformation info, Object[] array) { + if (reuse == null) { + reuse = mapper.createArrayNode(); + } else { + reuse.removeAll(); + } + + for (Object object : array) { + reuse.add(convert(reuse, null, info, object)); + } + return reuse; + } +} diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java index 7520de512..a42473420 100644 --- a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java @@ -18,6 +18,7 @@ package com.dtstack.flink.sql.sink.kafka.table; +import com.dtstack.flink.sql.enums.EUpdateMode; import com.dtstack.flink.sql.format.FormatType; import com.dtstack.flink.sql.table.AbstractTableParser; import com.dtstack.flink.sql.table.AbstractTableInfo; @@ -51,6 +52,7 @@ public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map kafkaParam = new HashMap(); @@ -58,6 +60,8 @@ public class KafkaSinkTableInfo extends AbstractTargetTableInfo { private String partitionKeys; + private String updateMode; + public void addKafkaParam(String key, String value) { kafkaParam.put(key, value); } @@ -70,7 +74,6 @@ public Set getKafkaParamKeys() { return kafkaParam.keySet(); } - public String getBootstrapServers() { return bootstrapServers; } @@ -103,6 +106,14 @@ public void setFieldDelimiter(String fieldDelimiter) { this.fieldDelimiter = fieldDelimiter; } + public String getUpdateMode() { + return updateMode; + } + + public void setUpdateMode(String updateMode) { + this.updateMode = updateMode; + } + @Override public boolean check() { Preconditions.checkNotNull(getType(), "kafka of type is required"); diff --git a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaDeserializationMetricWrapper.java b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaDeserializationMetricWrapper.java index afa950c5b..f08287cec 100644 --- a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaDeserializationMetricWrapper.java +++ b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaDeserializationMetricWrapper.java @@ -77,7 +77,7 @@ protected void beforeDeserialize() throws IOException { protected void registerPtMetric(AbstractFetcher fetcher) throws Exception { - Field consumerThreadField = fetcher.getClass().getSuperclass().getDeclaredField("consumerThread"); + Field consumerThreadField = getConsumerThreadField(fetcher); consumerThreadField.setAccessible(true); KafkaConsumerThread consumerThread = (KafkaConsumerThread) consumerThreadField.get(fetcher); @@ -115,6 +115,14 @@ public Long getValue() { } } + private Field getConsumerThreadField(AbstractFetcher fetcher) throws NoSuchFieldException { + try { + return fetcher.getClass().getDeclaredField("consumerThread"); + } catch (Exception e) { + return fetcher.getClass().getSuperclass().getDeclaredField("consumerThread"); + } + } + public void setFetcher(AbstractFetcher fetcher) { this.fetcher = fetcher; } diff --git a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer.java b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer.java index 71a9cc386..1cbbeafd9 100644 --- a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer.java +++ b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer.java @@ -23,7 +23,7 @@ import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; -import org.apache.flink.types.Row; +import org.apache.flink.table.runtime.types.CRow; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,7 +37,7 @@ * * @author maqi */ -public class KafkaProducer extends FlinkKafkaProducer { +public class KafkaProducer extends FlinkKafkaProducer { private static final Logger LOG = LoggerFactory.getLogger(KafkaProducer.class); @@ -45,7 +45,7 @@ public class KafkaProducer extends FlinkKafkaProducer { private SerializationMetricWrapper serializationMetricWrapper; - public KafkaProducer(String topicId, SerializationSchema serializationSchema, Properties producerConfig, Optional> customPartitioner, String[] parititonKeys) { + public KafkaProducer(String topicId, SerializationSchema serializationSchema, Properties producerConfig, Optional> customPartitioner, String[] parititonKeys) { super(topicId, new CustomerKeyedSerializationSchema((SerializationMetricWrapper)serializationSchema, parititonKeys), producerConfig, customPartitioner); this.serializationMetricWrapper = (SerializationMetricWrapper) serializationSchema; } diff --git a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducerFactory.java b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducerFactory.java index f3a2f40f5..6bf9014df 100644 --- a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducerFactory.java +++ b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducerFactory.java @@ -22,7 +22,7 @@ import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; -import org.apache.flink.types.Row; +import org.apache.flink.table.runtime.types.CRow; import java.util.Optional; import java.util.Properties; @@ -36,7 +36,7 @@ public class KafkaProducerFactory extends AbstractKafkaProducerFactory { @Override - public RichSinkFunction createKafkaProducer(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation, Properties properties, Optional> partitioner, String[] partitionKeys) { + public RichSinkFunction createKafkaProducer(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation, Properties properties, Optional> partitioner, String[] partitionKeys) { return new KafkaProducer(kafkaSinkTableInfo.getTopic(), createSerializationMetricWrapper(kafkaSinkTableInfo, typeInformation), properties, partitioner, partitionKeys); } } diff --git a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index 7105bc037..71e938ba5 100644 --- a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -31,6 +31,8 @@ import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; import org.apache.flink.table.api.TableSchema; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.table.runtime.types.CRowTypeInfo; import org.apache.flink.table.sinks.RetractStreamTableSink; import org.apache.flink.table.sinks.TableSink; import org.apache.flink.table.utils.TableConnectorUtils; @@ -56,13 +58,15 @@ public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener< protected Properties properties; - protected FlinkKafkaProducer flinkKafkaProducer; + protected FlinkKafkaProducer flinkKafkaProducer; + protected CRowTypeInfo typeInformation; + /** The schema of the table. */ private TableSchema schema; /** Partitioner to select Kafka partition for each item. */ - protected Optional> partitioner; + protected Optional> partitioner; private String[] partitionKeys; @@ -97,7 +101,9 @@ public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { this.parallelism = parallelism; } - this.flinkKafkaProducer = (FlinkKafkaProducer) new KafkaProducerFactory().createKafkaProducer(kafkaSinkTableInfo, getOutputType().getTypeAt(1), properties, partitioner, partitionKeys); + typeInformation = new CRowTypeInfo(new RowTypeInfo(fieldTypes, fieldNames)); + this.flinkKafkaProducer = (FlinkKafkaProducer) new KafkaProducerFactory() + .createKafkaProducer(kafkaSinkTableInfo, typeInformation, properties, partitioner, partitionKeys); return this; } @@ -108,9 +114,9 @@ public TypeInformation getRecordType() { @Override public void emitDataStream(DataStream> dataStream) { - DataStream mapDataStream = dataStream.filter((Tuple2 record) -> record.f0) - .map((Tuple2 record) -> record.f1) - .returns(getOutputType().getTypeAt(1)) + DataStream mapDataStream = dataStream + .map((Tuple2 record) -> new CRow(record.f1, record.f0)) + .returns(typeInformation) .setParallelism(parallelism); mapDataStream.addSink(flinkKafkaProducer).name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); diff --git a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer09.java b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer09.java index c815e134a..bee1865dd 100644 --- a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer09.java +++ b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer09.java @@ -23,6 +23,7 @@ import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer09; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; +import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.types.Row; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,7 +38,7 @@ * * @author maqi */ -public class KafkaProducer09 extends FlinkKafkaProducer09 { +public class KafkaProducer09 extends FlinkKafkaProducer09 { private static final Logger LOG = LoggerFactory.getLogger(KafkaProducer09.class); @@ -45,7 +46,7 @@ public class KafkaProducer09 extends FlinkKafkaProducer09 { private SerializationMetricWrapper serializationMetricWrapper; - public KafkaProducer09(String topicId, SerializationSchema serializationSchema, Properties producerConfig, Optional> customPartitioner,String[] partitionKeys) { + public KafkaProducer09(String topicId, SerializationSchema serializationSchema, Properties producerConfig, Optional> customPartitioner, String[] partitionKeys) { super(topicId, new CustomerKeyedSerializationSchema((SerializationMetricWrapper)serializationSchema, partitionKeys), producerConfig, customPartitioner.orElse(null)); this.serializationMetricWrapper = (SerializationMetricWrapper) serializationSchema; } diff --git a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer09Factory.java b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer09Factory.java index 7fb3909ee..ee3423b07 100644 --- a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer09Factory.java +++ b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer09Factory.java @@ -22,6 +22,7 @@ import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; +import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.types.Row; import java.util.Optional; @@ -36,7 +37,7 @@ public class KafkaProducer09Factory extends AbstractKafkaProducerFactory { @Override - public RichSinkFunction createKafkaProducer(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation, Properties properties, Optional> partitioner,String[] partitionKeys) { + public RichSinkFunction createKafkaProducer(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation, Properties properties, Optional> partitioner, String[] partitionKeys) { return new KafkaProducer09(kafkaSinkTableInfo.getTopic(), createSerializationMetricWrapper(kafkaSinkTableInfo, typeInformation), properties, partitioner, partitionKeys); } } diff --git a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index 0a991a8ea..e6dbdf3d3 100644 --- a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -27,10 +27,13 @@ import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.api.java.typeutils.TupleTypeInfo; import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer09; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; import org.apache.flink.table.api.TableSchema; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.table.runtime.types.CRowTypeInfo; import org.apache.flink.table.sinks.RetractStreamTableSink; import org.apache.flink.table.sinks.TableSink; import org.apache.flink.table.utils.TableConnectorUtils; @@ -56,13 +59,14 @@ public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener< protected Properties properties; - protected FlinkKafkaProducer09 kafkaProducer09; + protected FlinkKafkaProducer09 kafkaProducer09; + protected CRowTypeInfo typeInformation; /** The schema of the table. */ private TableSchema schema; /** Partitioner to select Kafka partition for each item. */ - protected Optional> partitioner; + protected Optional> partitioner; private String[] partitionKeys; @@ -101,8 +105,9 @@ public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { this.parallelism = parallelism; } - this.kafkaProducer09 = (FlinkKafkaProducer09) new KafkaProducer09Factory() - .createKafkaProducer(kafka09SinkTableInfo, getOutputType().getTypeAt(1), properties, partitioner, partitionKeys); + typeInformation = new CRowTypeInfo(new RowTypeInfo(fieldTypes, fieldNames)); + this.kafkaProducer09 = (FlinkKafkaProducer09) new KafkaProducer09Factory() + .createKafkaProducer(kafka09SinkTableInfo, typeInformation, properties, partitioner, partitionKeys); return this; } @@ -113,10 +118,10 @@ public TypeInformation getRecordType() { @Override public void emitDataStream(DataStream> dataStream) { - DataStream mapDataStream = dataStream.filter((Tuple2 record) -> record.f0) - .map((Tuple2 record) -> record.f1) - .returns(getOutputType().getTypeAt(1)) - .setParallelism(parallelism); + DataStream mapDataStream = dataStream + .map((Tuple2 record) -> new CRow(record.f1, record.f0)) + .returns(typeInformation) + .setParallelism(parallelism); mapDataStream.addSink(kafkaProducer09) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); diff --git a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer010.java b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer010.java index 3cdc45dec..3936575ef 100644 --- a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer010.java +++ b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer010.java @@ -23,7 +23,7 @@ import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer010; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; -import org.apache.flink.types.Row; +import org.apache.flink.table.runtime.types.CRow; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,7 +37,7 @@ * * @author maqi */ -public class KafkaProducer010 extends FlinkKafkaProducer010 { +public class KafkaProducer010 extends FlinkKafkaProducer010 { private static final Logger LOG = LoggerFactory.getLogger(KafkaProducer010.class); @@ -45,7 +45,7 @@ public class KafkaProducer010 extends FlinkKafkaProducer010 { private SerializationMetricWrapper serializationMetricWrapper; - public KafkaProducer010(String topicId, SerializationSchema serializationSchema, Properties producerConfig, Optional> customPartitioner, String[] partitionKeys) { + public KafkaProducer010(String topicId, SerializationSchema serializationSchema, Properties producerConfig, Optional> customPartitioner, String[] partitionKeys) { super(topicId, new CustomerKeyedSerializationSchema((SerializationMetricWrapper)serializationSchema, partitionKeys), producerConfig, customPartitioner.get()); this.serializationMetricWrapper = (SerializationMetricWrapper) serializationSchema; } diff --git a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer010Factory.java b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer010Factory.java index c44a9fe86..e0e023586 100644 --- a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer010Factory.java +++ b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer010Factory.java @@ -22,6 +22,7 @@ import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; +import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.types.Row; import java.util.Optional; @@ -36,7 +37,7 @@ public class KafkaProducer010Factory extends AbstractKafkaProducerFactory { @Override - public RichSinkFunction createKafkaProducer(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation, Properties properties, Optional> partitioner, String[] partitionKeys) { + public RichSinkFunction createKafkaProducer(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation, Properties properties, Optional> partitioner, String[] partitionKeys) { return new KafkaProducer010(kafkaSinkTableInfo.getTopic(), createSerializationMetricWrapper(kafkaSinkTableInfo, typeInformation), properties, partitioner, partitionKeys); } } diff --git a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index 34ea8fc5f..ac5a11810 100644 --- a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -29,6 +29,8 @@ import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; import org.apache.flink.table.api.TableSchema; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.table.runtime.types.CRowTypeInfo; import org.apache.flink.table.sinks.RetractStreamTableSink; import org.apache.flink.table.sinks.TableSink; import org.apache.flink.table.utils.TableConnectorUtils; @@ -61,6 +63,9 @@ public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener< protected KafkaSinkTableInfo kafka10SinkTableInfo; + protected RichSinkFunction kafkaProducer010; + protected CRowTypeInfo typeInformation; + /** The schema of the table. */ private TableSchema schema; @@ -97,6 +102,12 @@ public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { if (parallelism != null) { this.parallelism = parallelism; } + + typeInformation = new CRowTypeInfo(new RowTypeInfo(fieldTypes, fieldNames)); + kafkaProducer010 = new KafkaProducer010Factory().createKafkaProducer(kafka10SinkTableInfo, + typeInformation, properties, + Optional.of(new CustomerFlinkPartition<>()), partitionKeys); + return this; } @@ -107,13 +118,9 @@ public TypeInformation getRecordType() { @Override public void emitDataStream(DataStream> dataStream) { - - RichSinkFunction kafkaProducer010 = new KafkaProducer010Factory().createKafkaProducer(kafka10SinkTableInfo, getOutputType().getTypeAt(1), properties, - Optional.of(new CustomerFlinkPartition<>()), partitionKeys); - - DataStream mapDataStream = dataStream.filter((Tuple2 record) -> record.f0) - .map((Tuple2 record) -> record.f1) - .returns(getOutputType().getTypeAt(1)) + DataStream mapDataStream = dataStream + .map((Tuple2 record) -> new CRow(record.f1, record.f0)) + .returns(typeInformation) .setParallelism(parallelism); mapDataStream.addSink(kafkaProducer010).name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); diff --git a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer011.java b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer011.java index 7880dd419..429d21a79 100644 --- a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer011.java +++ b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer011.java @@ -24,7 +24,7 @@ import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer011; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; -import org.apache.flink.types.Row; +import org.apache.flink.table.runtime.types.CRow; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +38,7 @@ * * @author maqi */ -public class KafkaProducer011 extends FlinkKafkaProducer011 { +public class KafkaProducer011 extends FlinkKafkaProducer011 { private static final Logger LOG = LoggerFactory.getLogger(KafkaProducer011.class); @@ -46,7 +46,7 @@ public class KafkaProducer011 extends FlinkKafkaProducer011 { private SerializationMetricWrapper serializationMetricWrapper; - public KafkaProducer011(String topicId, SerializationSchema serializationSchema, Properties producerConfig, Optional> customPartitioner, String[] partitionKeys) { + public KafkaProducer011(String topicId, SerializationSchema serializationSchema, Properties producerConfig, Optional> customPartitioner, String[] partitionKeys) { super(topicId, new CustomerKeyedSerializationSchema((SerializationMetricWrapper)serializationSchema, partitionKeys), producerConfig, customPartitioner); this.serializationMetricWrapper = (SerializationMetricWrapper) serializationSchema; } diff --git a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer011Factory.java b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer011Factory.java index e2272b16e..0cb11da82 100644 --- a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer011Factory.java +++ b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaProducer011Factory.java @@ -22,6 +22,7 @@ import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; +import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.types.Row; import java.util.Optional; @@ -36,7 +37,7 @@ public class KafkaProducer011Factory extends AbstractKafkaProducerFactory { @Override - public RichSinkFunction createKafkaProducer(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation, Properties properties, Optional> partitioner, + public RichSinkFunction createKafkaProducer(KafkaSinkTableInfo kafkaSinkTableInfo, TypeInformation typeInformation, Properties properties, Optional> partitioner, String[] partitionKeys) { return new KafkaProducer011(kafkaSinkTableInfo.getTopic(), createSerializationMetricWrapper(kafkaSinkTableInfo, typeInformation), properties, partitioner, partitionKeys); } diff --git a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index d7807a935..835941ca3 100644 --- a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -31,6 +31,8 @@ import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; import org.apache.flink.table.api.TableSchema; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.table.runtime.types.CRowTypeInfo; import org.apache.flink.table.sinks.RetractStreamTableSink; import org.apache.flink.table.sinks.TableSink; import org.apache.flink.table.utils.TableConnectorUtils; @@ -61,13 +63,15 @@ public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener protected Properties properties; - protected FlinkKafkaProducer011 kafkaProducer011; + protected FlinkKafkaProducer011 kafkaProducer011; + protected CRowTypeInfo typeInformation; + /** The schema of the table. */ private TableSchema schema; /** Partitioner to select Kafka partition for each item. */ - protected Optional> partitioner; + protected Optional> partitioner; private String[] partitionKeys; @@ -102,8 +106,9 @@ public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { this.parallelism = parallelism; } - this.kafkaProducer011 = (FlinkKafkaProducer011) new KafkaProducer011Factory() - .createKafkaProducer(kafka11SinkTableInfo, getOutputType().getTypeAt(1), properties, partitioner, partitionKeys); + typeInformation = new CRowTypeInfo(new RowTypeInfo(fieldTypes, fieldNames)); + this.kafkaProducer011 = (FlinkKafkaProducer011) new KafkaProducer011Factory() + .createKafkaProducer(kafka11SinkTableInfo, typeInformation, properties, partitioner, partitionKeys); return this; } @@ -114,9 +119,10 @@ public TypeInformation getRecordType() { @Override public void emitDataStream(DataStream> dataStream) { - DataStream mapDataStream = dataStream.filter((Tuple2 record) -> record.f0) - .map((Tuple2 record) -> record.f1) - .returns(getOutputType().getTypeAt(1)) + + DataStream mapDataStream = dataStream + .map((Tuple2 record) -> new CRow(record.f1, record.f0)) + .returns(typeInformation) .setParallelism(parallelism); mapDataStream.addSink(kafkaProducer011).name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); diff --git a/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaConsumer011Factory.java b/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaConsumer011Factory.java index 3f804fc6c..c20f0678b 100644 --- a/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaConsumer011Factory.java +++ b/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaConsumer011Factory.java @@ -31,8 +31,8 @@ /** * company: www.dtstack.com - * author: toutian * create: 2019/12/24 + * @author: toutian */ public class KafkaConsumer011Factory extends AbstractKafkaConsumerFactory { diff --git a/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java b/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java index 11be1898a..2f760bdf2 100644 --- a/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java +++ b/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java @@ -93,11 +93,11 @@ public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExec //earliest,latest if ("earliest".equalsIgnoreCase(kafkaSourceTableInfo.getOffsetReset())) { kafkaSrc.setStartFromEarliest(); - } else if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())) {// {"0":12312,"1":12321,"2":12312} + } else if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())) { try { Properties properties = PluginUtil.jsonStrToObject(kafkaSourceTableInfo.getOffsetReset(), Properties.class); Map offsetMap = PluginUtil.objectToMap(properties); - Map specificStartupOffsets = new HashMap<>(); + Map specificStartupOffsets = new HashMap<>(16); for (Map.Entry entry : offsetMap.entrySet()) { specificStartupOffsets.put(new KafkaTopicPartition(topicName, Integer.valueOf(entry.getKey())), Long.valueOf(entry.getValue().toString())); } From 1cb21db06ca2dce2bdc9cb0e53a984647151a692 Mon Sep 17 00:00:00 2001 From: maqi Date: Wed, 18 Mar 2020 16:35:32 +0800 Subject: [PATCH 32/47] kafka avro and csv ,retract sink --- README.md | 24 +- docs/kafkaSink.md | 223 ++++++++++++++++++ docs/kafkaSource.md | 184 +-------------- .../kafka/AbstractKafkaProducerFactory.java | 5 +- .../AvroCRowSerializationSchema.java | 44 +++- .../CsvCRowSerializationSchema.java | 39 ++- .../sql/sink/kafka/table/KafkaSinkParser.java | 3 + .../sink/kafka/table/KafkaSinkTableInfo.java | 28 +++ 8 files changed, 324 insertions(+), 226 deletions(-) create mode 100644 docs/kafkaSink.md diff --git a/README.md b/README.md index ccf94f1ff..c84d77d5a 100644 --- a/README.md +++ b/README.md @@ -7,34 +7,11 @@ > > * 支持原生FLinkSQL所有的语法 > > * 扩展了输入和输出的性能指标到promethus - ## 新特性: - * 1.kafka源表支持not null语法,支持字符串类型的时间转换。 - * 2.rdb维表与DB建立连接时,周期进行连接,防止连接断开。rdbsink写入时,对连接进行检查。 - * 3.异步维表支持非等值连接,比如:<>,<,>。 - * 4.增加kafka数组解析 - * 5.增加kafka1.0以上版本的支持 - * 6.增加postgresql、kudu、clickhouse维表、结果表的支持 - * 7.支持插件的依赖方式,参考pluginLoadMode参数 - * 8.支持cep处理 - * 9.支持udaf - * 10.支持谓词下移 - * 11.支持状态的ttl - - ## BUG修复: - * 1.修复不能解析sql中orderby,union语法。 - * 2.修复yarnPer模式提交失败的异常。 - * 3.一些bug的修复 - # 已支持 * 源表:kafka 0.9、0.10、0.11、1.x版本 * 维表:mysql, SQlServer,oracle, hbase, mongo, redis, cassandra, serversocket, kudu, postgresql, clickhouse, impala, db2, sqlserver * 结果表:mysql, SQlServer, oracle, hbase, elasticsearch5.x, mongo, redis, cassandra, console, kudu, postgresql, clickhouse, impala, db2, sqlserver -# 后续开发计划 - * 维表快照 - * kafka avro格式 - * topN - ## 1 快速起步 ### 1.1 运行模式 @@ -205,6 +182,7 @@ sh submit.sh -sql D:\sideSql.txt -name xctest -remoteSqlPluginPath /opt/dtstack * [impala 结果表插件](docs/impalaSink.md) * [db2 结果表插件](docs/db2Sink.md) * [sqlserver 结果表插件](docs/sqlserverSink.md) +* [kafka 结果表插件](docs/kafkaSink.md) ### 2.3 维表插件 * [hbase 维表插件](docs/hbaseSide.md) diff --git a/docs/kafkaSink.md b/docs/kafkaSink.md new file mode 100644 index 000000000..3c6eb1dc6 --- /dev/null +++ b/docs/kafkaSink.md @@ -0,0 +1,223 @@ +## 1.格式: +``` +CREATE TABLE tableName( + colName colType, + ... + function(colNameX) AS aliasName, + WATERMARK FOR colName AS withOffset( colName , delayTime ) + )WITH( + type ='kafka11', + bootstrapServers ='ip:port,ip:port...', + zookeeperQuorum ='ip:port,ip:port/zkparent', + offsetReset ='latest', + topic ='topicName', + groupId='test', + parallelism ='parllNum', + ); +``` + +## 2.支持的版本 + kafka09,kafka10,kafka11及以上版本 + **kafka读取和写入的版本必须一致,否则会有兼容性错误。** + +## 3.表结构定义 + +|参数名称|含义| +|----|---| +| tableName | 在 sql 中使用的名称;即注册到flink-table-env上的名称| +| colName | 列名称| +| colType | 列类型 [colType支持的类型](colType.md)| + +## 4.参数: + +|参数名称|含义|是否必填|默认值| +|----|---|---|---| +|type | kafka09 | 是|kafka09、kafka10、kafka11、kafka(对应kafka1.0及以上版本)| +|groupId | 需要读取的 groupId 名称|否|| +|bootstrapServers | kafka bootstrap-server 地址信息(多个用逗号隔开)|是|| +|zookeeperQuorum | kafka zk地址信息(多个之间用逗号分隔)|是|| +|topic | 需要读取的 topic 名称|是|| +|parallelism | 并行度设置|否|1| +|partitionKeys | 用来分区的字段|否|| +|updateMode | 回溯流数据下发模式,append,upsert.upsert模式下会将是否为回溯信息以字段形式进行下发。|否|append| +|sinkdatatype | 写入kafka数据格式,json,avro,csv|否|json| +|fieldDelimiter | csv数据分隔符|否| \ | + + +**kafka相关参数可以自定义,使用kafka.开头即可。** +``` +kafka.consumer.id +kafka.socket.timeout.ms +kafka.fetch.message.max.bytes +kafka.num.consumer.fetchers +kafka.auto.commit.enable +kafka.auto.commit.interval.ms +kafka.queued.max.message.chunks +kafka.rebalance.max.retries +kafka.fetch.min.bytes +kafka.fetch.wait.max.ms +kafka.rebalance.backoff.ms +kafka.refresh.leader.backoff.ms +kafka.consumer.timeout.ms +kafka.exclude.internal.topics +kafka.partition.assignment.strategy +kafka.client.id +kafka.zookeeper.session.timeout.ms +kafka.zookeeper.connection.timeout.ms +kafka.zookeeper.sync.time.ms +kafka.offsets.storage +kafka.offsets.channel.backoff.ms +kafka.offsets.channel.socket.timeout.ms +kafka.offsets.commit.max.retries +kafka.dual.commit.enabled +kafka.partition.assignment.strategy +kafka.socket.receive.buffer.bytes +kafka.fetch.min.bytes +``` + +## 5.样例: + +### json格式: +``` +CREATE TABLE MyResult( + channel varchar, + pv varchar + )WITH( + type='kafka', + bootstrapServers='172.16.8.107:9092', + topic='mqTest02', + parallelism ='2', + partitionKeys = 'channel,pv', + updateMode='upsert' + ); + +upsert模式下发的数据格式:{"channel":"zs","pv":"330",retract:true} +append模式下发的数据格式:{"channel":"zs","pv":"330"} + +``` + +### avro格式: + +如果updateMode='upsert',schemaInfo需要包含retract属性信息。 + +``` +CREATE TABLE MyTable( + channel varchar, + pv varchar + --xctime bigint + )WITH( + type='kafka', + bootstrapServers='172.16.8.107:9092', + groupId='mqTest01', + offsetReset='latest', + topic='mqTest01', + parallelism ='1', + topicIsPattern ='false' + ); + +create table sideTable( + channel varchar, + xccount int, + PRIMARY KEY(channel), + PERIOD FOR SYSTEM_TIME + )WITH( + type='mysql', + url='jdbc:mysql://172.16.8.109:3306/test?charset=utf8', + userName='dtstack', + password='abc123', + tableName='sidetest', + cache = 'LRU', + cacheTTLMs='10000', + parallelism ='1' + + ); + + +CREATE TABLE MyResult( + channel varchar, + pv varchar + )WITH( + --type='console' + type='kafka', + bootstrapServers='172.16.8.107:9092', + topic='mqTest02', + parallelism ='1', + updateMode='upsert', + sinkdatatype = 'avro', + schemaInfo = '{"type":"record","name":"MyResult","fields":[{"name":"channel","type":"string"} + ,{"name":"pv","type":"string"},{"name":"channel","type":"string"}, + {"name":"retract","type":"boolean"}]}' + + ); + + +insert +into + MyResult + select + a.channel as channel, + a.pv as pv + from + MyTable a +``` +### csv格式: + +``` +CREATE TABLE MyTable( + channel varchar, + pv varchar + --xctime bigint + )WITH( + type='kafka', + bootstrapServers='172.16.8.107:9092', + groupId='mqTest01', + offsetReset='latest', + topic='mqTest01', + parallelism ='2', + topicIsPattern ='false' + ); + +create table sideTable( + channel varchar, + xccount int, + PRIMARY KEY(channel), + PERIOD FOR SYSTEM_TIME + )WITH( + type='mysql', + url='jdbc:mysql://172.16.8.109:3306/test?charset=utf8', + userName='dtstack', + password='abc123', + tableName='sidetest', + cache = 'LRU', + cacheTTLMs='10000', + parallelism ='1' + + ); + + +CREATE TABLE MyResult( + channel varchar, + pv varchar + )WITH( + type='kafka', + bootstrapServers='172.16.8.107:9092', + topic='mqTest02', + parallelism ='2', + updateMode='upsert', + sinkdatatype = 'csv', + fieldDelimiter='*' + + + + ); + + +insert +into + MyResult + select + a.channel as channel, + a.pv as pv + from + MyTable a +``` diff --git a/docs/kafkaSource.md b/docs/kafkaSource.md index 76096aba6..9b488863e 100644 --- a/docs/kafkaSource.md +++ b/docs/kafkaSource.md @@ -1,14 +1,12 @@ ## 1.格式: ``` -数据现在支持json格式{"xx":"bb","cc":"dd"} - CREATE TABLE tableName( colName colType, ... function(colNameX) AS aliasName, WATERMARK FOR colName AS withOffset( colName , delayTime ) )WITH( - type ='kafka09', + type ='kafka11', bootstrapServers ='ip:port,ip:port...', zookeeperQuorum ='ip:port,ip:port/zkparent', offsetReset ='latest', @@ -89,7 +87,7 @@ CREATE TABLE MyTable( xctime bigint, CHARACTER_LENGTH(channel) AS timeLeng )WITH( - type ='kafka09', + type ='kafka11', bootstrapServers ='172.16.8.198:9092', zookeeperQuorum ='172.16.8.198:2181/kafka', offsetReset ='latest', @@ -208,181 +206,3 @@ CREATE TABLE MyTable( lengthcheckpolicy = 'PAD' ); ``` -# 三、text格式数据源UDF自定义拆分 -Kafka源表数据解析流程:Kafka Source Table -> UDTF ->Realtime Compute -> SINK。从Kakfa读入的数据,都是VARBINARY(二进制)格式,对读入的每条数据,都需要用UDTF将其解析成格式化数据。 - 与其他格式不同,本格式定义DDL必须与以下SQL一摸一样,表中的五个字段顺序务必保持一致: - -## 1. 定义源表,注意:kafka源表DDL字段必须与以下例子一模一样。WITH中参数可改。 -``` -create table kafka_stream( - _topic STRING, - _messageKey STRING, - _message STRING, - _partition INT, - _offset BIGINT, -) with ( - type ='kafka09', - bootstrapServers ='172.16.8.198:9092', - zookeeperQuorum ='172.16.8.198:2181/kafka', - offsetReset ='latest', - topic ='nbTest1', - parallelism ='1', - sourcedatatype='text' - ) -``` -## 2.参数: - -|参数名称|含义|是否必填|默认值| -|----|---|---|---| -|type | kafka09 | 是|| -|bootstrapServers | kafka bootstrap-server 地址信息(多个用逗号隔开)|是|| -|zookeeperQuorum | kafka zk地址信息(多个之间用逗号分隔)|是|| -|topic | 需要读取的 topic 名称|是|| -|offsetReset | 读取的topic 的offset初始位置[latest|earliest]|否|latest| -|parallelism | 并行度设置|否|1| -|sourcedatatype | 数据类型|否|text| -**kafka相关参数可以自定义,使用kafka.开头即可。** - -## 2.自定义: -从kafka读出的数据,需要进行窗口计算。 按照实时计算目前的设计,滚窗/滑窗等窗口操作,需要(且必须)在源表DDL上定义Watermark。Kafka源表比较特殊。如果要以kafka中message字段中的的Event Time进行窗口操作, -需要先从message字段,使用UDX解析出event time,才能定义watermark。 在kafka源表场景中,需要使用计算列。 假设,kafka中写入的数据如下: -2018-11-11 00:00:00|1|Anna|female整个计算流程为:Kafka SOURCE->UDTF->Realtime Compute->RDS SINK(单一分隔符可直接使用类csv格式模板,自定义适用于更复杂的数据类型,本说明只做参考) - -**SQL** -``` --- 定义解析Kakfa message的UDTF - CREATE FUNCTION kafkapaser AS 'com.XXXX.kafkaUDTF'; - CREATE FUNCTION kafkaUDF AS 'com.XXXX.kafkaUDF'; - -- 定义源表,注意:kafka源表DDL字段必须与以下例子一模一样。WITH中参数可改。 - create table kafka_src ( - _topic STRING, - _messageKey STRING, - _message STRING, - _partition INT, - _offset BIGINT, - ctime AS TO_TIMESTAMP(kafkaUDF(_message)), -- 定义计算列,计算列可理解为占位符,源表中并没有这一列,其中的数据可经过下游计算得出。注意计算里的类型必须为timestamp才能在做watermark。 - watermark for ctime as withoffset(ctime,0) -- 在计算列上定义watermark - ) WITH ( - type = 'kafka010', -- Kafka Source类型,与Kafka版本强相关,目前支持的Kafka版本请参考本文档 - topic = 'test_kafka_topic', - ... - ); - create table rds_sink ( - name VARCHAR, - age INT, - grade VARCHAR, - updateTime TIMESTAMP - ) WITH( - type='mysql', - url='jdbc:mysql://localhost:3306/test', - tableName='test4', - userName='test', - password='XXXXXX' - ); - -- 使用UDTF,将二进制数据解析成格式化数据 - CREATE VIEW input_view ( - name, - age, - grade, - updateTime - ) AS - SELECT - COUNT(*) as cnt, - T.ctime, - T.order, - T.name, - T.sex - from - kafka_src as S, - LATERAL TABLE (kafkapaser _message)) as T ( - ctime, - order, - name, - sex - ) - Group BY T.sex, - TUMBLE(ctime, INTERVAL '1' MINUTE); - -- 对input_view中输出的数据做计算 - CREATE VIEW view2 ( - cnt, - sex - ) AS - SELECT - COUNT(*) as cnt, - T.sex - from - input_view - Group BY sex, TUMBLE(ctime, INTERVAL '1' MINUTE); - -- 使用解析出的格式化数据进行计算,并将结果输出到RDS中 - insert into rds_sink - SELECT - cnt,sex - from view2; - ``` -**UDF&UDTF** -``` -package com.XXXX; - import com.XXXX.fastjson.JSONObject; - import org.apache.flink.table.functions.TableFunction; - import org.apache.flink.table.types.DataType; - import org.apache.flink.table.types.DataTypes; - import org.apache.flink.types.Row; - import java.io.UnsupportedEncodingException; - /** - 以下例子解析输入Kafka中的JSON字符串,并将其格式化输出 - **/ - public class kafkaUDTF extends TableFunction { - public void eval(byte[] message) { - try { - // 读入一个二进制数据,并将其转换为String格式 - String msg = new String(message, "UTF-8"); - // 提取JSON Object中各字段 - String ctime = Timestamp.valueOf(data.split('\\|')[0]); - String order = data.split('\\|')[1]; - String name = data.split('\\|')[2]; - String sex = data.split('\\|')[3]; - // 将解析出的字段放到要输出的Row()对象 - Row row = new Row(4); - row.setField(0, ctime); - row.setField(1, age); - row.setField(2, grade); - row.setField(3, updateTime); - System.out.println("Kafka message str ==>" + row.toString()); - // 输出一行 - collect(row); - } catch (ClassCastException e) { - System.out.println("Input data format error. Input data " + msg + "is not json string"); - } - } catch (UnsupportedEncodingException e) { - e.printStackTrace(); - } - } - @Override - // 如果返回值是Row,就必须重载实现这个方法,显式地告诉系统返回的字段类型 - // 定义输出Row()对象的字段类型 - public DataType getResultType(Object[] arguments, Class[] argTypes) { - return DataTypes.createRowType(DataTypes.TIMESTAMP,DataTypes.STRING, DataTypes.Integer, DataTypes.STRING,DataTypes.STRING); - } - } - - package com.dp58; - package com.dp58.sql.udx; - import org.apache.flink.table.functions.FunctionContext; - import org.apache.flink.table.functions.ScalarFunction; - public class KafkaUDF extends ScalarFunction { - // 可选,open方法可以不写 - // 需要import org.apache.flink.table.functions.FunctionContext; - public String eval(byte[] message) { - // 读入一个二进制数据,并将其转换为String格式 - String msg = new String(message, "UTF-8"); - return msg.split('\\|')[0]; - } - public long eval(String b, String c) { - return eval(b) + eval(c); - } - //可选,close方法可以不写 - @Override - public void close() { - } - } - ``` diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java index ebd313b29..9dcaf222b 100644 --- a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java @@ -75,15 +75,16 @@ private SerializationSchema createSerializationSchema(KafkaSinkTableInfo k if (StringUtils.isBlank(kafkaSinkTableInfo.getFieldDelimiter())) { throw new IllegalArgumentException("sinkDataType:" + FormatType.CSV.name() + " must set fieldDelimiter"); } - final CsvCRowSerializationSchema.Builder serSchemaBuilder = new CsvCRowSerializationSchema.Builder(typeInformation); serSchemaBuilder.setFieldDelimiter(kafkaSinkTableInfo.getFieldDelimiter().toCharArray()[0]); + serSchemaBuilder.setUpdateMode(kafkaSinkTableInfo.getUpdateMode()); + serializationSchema = serSchemaBuilder.build(); } else if (FormatType.AVRO.name().equalsIgnoreCase(kafkaSinkTableInfo.getSinkDataType())) { if (StringUtils.isBlank(kafkaSinkTableInfo.getSchemaString())) { throw new IllegalArgumentException("sinkDataType:" + FormatType.AVRO.name() + " must set schemaString"); } - serializationSchema = new AvroCRowSerializationSchema(kafkaSinkTableInfo.getSchemaString(),kafkaSinkTableInfo.getUpdateMode()); + serializationSchema = new AvroCRowSerializationSchema(kafkaSinkTableInfo.getSchemaString(), kafkaSinkTableInfo.getUpdateMode()); } if (null == serializationSchema) { diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/AvroCRowSerializationSchema.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/AvroCRowSerializationSchema.java index 34fa22c99..692e208b5 100644 --- a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/AvroCRowSerializationSchema.java +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/AvroCRowSerializationSchema.java @@ -36,8 +36,6 @@ import org.apache.avro.util.Utf8; import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.serialization.SerializationSchema; -import org.apache.flink.formats.avro.AvroRowDeserializationSchema; -import org.apache.flink.formats.avro.typeutils.AvroSchemaConverter; import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.types.Row; import org.apache.flink.util.Preconditions; @@ -55,17 +53,16 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.TimeZone; +import java.util.stream.Collectors; /** * Serialization schema that serializes CROW into Avro bytes. * *

Serializes objects that are represented in (nested) Flink rows. It support types that * are compatible with Flink's Table & SQL API. - * - *

Note: Changes in this class need to be kept in sync with the corresponding runtime - * class {@link AvroRowDeserializationSchema} and schema converter {@link AvroSchemaConverter}. - * + ** * @author maqi */ public class AvroCRowSerializationSchema implements SerializationSchema { @@ -107,14 +104,14 @@ public class AvroCRowSerializationSchema implements SerializationSchema { private String updateMode; - private final String retractKey = "retract"; + private String retractKey = "retract"; /** * Creates an Avro serialization schema for the given specific record class. * * @param recordClazz Avro record class used to serialize Flink's row to Avro's record */ - public AvroCRowSerializationSchema(Class recordClazz) { + public AvroCRowSerializationSchema(Class recordClazz, String updateMode) { Preconditions.checkNotNull(recordClazz, "Avro record class must not be null."); this.recordClazz = recordClazz; this.schema = SpecificData.get().getSchema(recordClazz); @@ -122,6 +119,7 @@ public AvroCRowSerializationSchema(Class recordClazz) this.datumWriter = new SpecificDatumWriter<>(schema); this.arrayOutputStream = new ByteArrayOutputStream(); this.encoder = EncoderFactory.get().binaryEncoder(arrayOutputStream, null); + this.updateMode = updateMode; } /** @@ -152,10 +150,10 @@ public byte[] serialize(CRow crow) { // convert to record final GenericRecord record = convertRowToAvroRecord(schema, row); + + dealRetractField(change, record); + arrayOutputStream.reset(); - if (StringUtils.equalsIgnoreCase(updateMode, EUpdateMode.UPSERT.name())) { - record.put(retractKey, change); - } datumWriter.write(record, encoder); encoder.flush(); return arrayOutputStream.toByteArray(); @@ -164,6 +162,18 @@ public byte[] serialize(CRow crow) { } } + protected void dealRetractField(boolean change, GenericRecord record) { + schema.getFields() + .stream() + .filter(field -> StringUtils.equalsIgnoreCase(field.name(), retractKey)) + .findFirst() + .ifPresent(field -> { + if (StringUtils.equalsIgnoreCase(updateMode, EUpdateMode.UPSERT.name())) { + record.put(retractKey, convertFlinkType(field.schema(), change)); + } + }); + } + @Override public boolean equals(Object o) { if (this == o) { @@ -184,7 +194,12 @@ public int hashCode() { // -------------------------------------------------------------------------------------------- private GenericRecord convertRowToAvroRecord(Schema schema, Row row) { - final List fields = schema.getFields(); + + final List fields = schema.getFields() + .stream() + .filter(field -> !StringUtils.equalsIgnoreCase(field.name(), retractKey)) + .collect(Collectors.toList()); + final int length = fields.size(); final GenericRecord record = new GenericData.Record(schema); for (int i = 0; i < length; i++) { @@ -328,6 +343,8 @@ private long convertFromTimestamp(Schema schema, Timestamp date) { private void writeObject(ObjectOutputStream outputStream) throws IOException { outputStream.writeObject(recordClazz); outputStream.writeObject(schemaString); // support for null + outputStream.writeObject(retractKey); + outputStream.writeObject(updateMode); } @SuppressWarnings("unchecked") @@ -339,6 +356,9 @@ private void readObject(ObjectInputStream inputStream) throws ClassNotFoundExcep } else { schema = new Schema.Parser().parse(schemaString); } + retractKey = (String) inputStream.readObject(); + updateMode = (String) inputStream.readObject(); + datumWriter = new SpecificDatumWriter<>(schema); arrayOutputStream = new ByteArrayOutputStream(); encoder = EncoderFactory.get().binaryEncoder(arrayOutputStream, null); diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/CsvCRowSerializationSchema.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/CsvCRowSerializationSchema.java index 903395f9d..4e57b6f2a 100644 --- a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/CsvCRowSerializationSchema.java +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/serialization/CsvCRowSerializationSchema.java @@ -47,7 +47,10 @@ import java.math.BigDecimal; import java.math.BigInteger; import java.util.Arrays; +import java.util.Iterator; import java.util.Objects; +import java.util.stream.IntStream; +import java.util.stream.Stream; /** * Serialization schema that serializes an object of Flink types into a CSV bytes. @@ -72,25 +75,27 @@ public final class CsvCRowSerializationSchema implements SerializationSchema kafkaParam = new HashMap(); @@ -119,9 +129,27 @@ public boolean check() { Preconditions.checkNotNull(getType(), "kafka of type is required"); Preconditions.checkNotNull(bootstrapServers, "kafka of bootstrapServers is required"); Preconditions.checkNotNull(topic, "kafka of topic is required"); + + if (StringUtils.equalsIgnoreCase(getSinkDataType(), FormatType.AVRO.name())) { + avroParamCheck(); + } + return false; } + public void avroParamCheck() { + Preconditions.checkNotNull(schemaString, "avro type schemaInfo is required"); + if (StringUtils.equalsIgnoreCase(updateMode, EUpdateMode.UPSERT.name())) { + Schema schema = new Schema.Parser().parse(schemaString); + schema.getFields() + .stream() + .filter(field -> StringUtils.equalsIgnoreCase(field.name(), RETRACT_FIELD_KEY)) + .findFirst() + .orElseThrow(() -> + new NullPointerException(String.valueOf("arvo upsert mode the retract attribute must be contained in schemaInfo field "))); + } + } + public String getEnableKeyPartition() { return enableKeyPartition; } From d124305ffe07b66f8eee430b3c0ac1072281dc14 Mon Sep 17 00:00:00 2001 From: maqi Date: Thu, 19 Mar 2020 18:37:48 +0800 Subject: [PATCH 33/47] add jacoco --- pom.xml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pom.xml b/pom.xml index 5493645e9..de963240c 100644 --- a/pom.xml +++ b/pom.xml @@ -54,6 +54,20 @@ + + org.jacoco + jacoco-maven-plugin + 0.7.8 + + + + prepare-agent + report + + + + + pl.project13.maven git-commit-id-plugin From baabfd45555949eb8ead829da31e6d1c492dfbbd Mon Sep 17 00:00:00 2001 From: maqi Date: Fri, 20 Mar 2020 16:41:31 +0800 Subject: [PATCH 34/47] extract abstractKafkaSource --- .../dtstack/flink/sql/util/DtStringUtil.java | 8 +- .../sql/source/kafka/AbstractKafkaSource.java | 117 ++++++++++++ .../sql/source/kafka/enums/EKafkaOffset.java | 31 ++++ .../source/kafka/table/KafkaSourceParser.java | 29 +-- .../kafka/table/KafkaSourceTableInfo.java | 175 ++++++++---------- .../flink/sql/source/kafka/KafkaSource.java | 74 +------- .../flink/sql/source/kafka/KafkaSource.java | 72 +------ .../flink/sql/source/kafka/KafkaSource.java | 87 ++------- .../flink/sql/source/kafka/KafkaSource.java | 90 ++------- 9 files changed, 292 insertions(+), 391 deletions(-) create mode 100644 kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/AbstractKafkaSource.java create mode 100644 kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/enums/EKafkaOffset.java diff --git a/core/src/main/java/com/dtstack/flink/sql/util/DtStringUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/DtStringUtil.java index b2486b1ab..10a6b4f63 100644 --- a/core/src/main/java/com/dtstack/flink/sql/util/DtStringUtil.java +++ b/core/src/main/java/com/dtstack/flink/sql/util/DtStringUtil.java @@ -219,14 +219,14 @@ public static String addJdbcParam(String dbUrl, Map addParams, b return preStr + "?" + sb.toString(); } - public static boolean isJosn(String str){ + public static boolean isJson(String str) { boolean flag = false; - if(StringUtils.isNotBlank(str)){ + if (StringUtils.isNotBlank(str)) { try { - objectMapper.readValue(str,Map.class); + objectMapper.readValue(str, Map.class); flag = true; } catch (Throwable e) { - flag=false; + flag = false; } } return flag; diff --git a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/AbstractKafkaSource.java b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/AbstractKafkaSource.java new file mode 100644 index 000000000..852a381e2 --- /dev/null +++ b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/AbstractKafkaSource.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.dtstack.flink.sql.source.kafka; + +import com.dtstack.flink.sql.source.IStreamSourceGener; +import com.dtstack.flink.sql.source.kafka.enums.EKafkaOffset; +import com.dtstack.flink.sql.source.kafka.table.KafkaSourceTableInfo; +import com.dtstack.flink.sql.util.DtStringUtil; +import com.dtstack.flink.sql.util.PluginUtil; +import org.apache.commons.lang3.StringUtils; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase; +import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition; +import org.apache.flink.table.api.Table; +import org.apache.flink.types.Row; +import org.apache.kafka.clients.consumer.ConsumerConfig; + +import java.util.Map; +import java.util.Properties; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +/** + * Date: 2020/3/20 + * Company: www.dtstack.com + * @author maqi + */ +public abstract class AbstractKafkaSource implements IStreamSourceGener

{ + + private static final String SOURCE_OPERATOR_NAME_TPL = "${topic}_${table}"; + + protected Properties getKafkaProperties(KafkaSourceTableInfo kafkaSourceTableInfo) { + Properties props = new Properties(); + props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaSourceTableInfo.getBootstrapServers()); + + if (DtStringUtil.isJson(kafkaSourceTableInfo.getOffsetReset())) { + props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, EKafkaOffset.NONE.name().toLowerCase()); + } else { + props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, kafkaSourceTableInfo.getOffsetReset()); + } + + if (StringUtils.isNotBlank(kafkaSourceTableInfo.getGroupId())) { + props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, kafkaSourceTableInfo.getGroupId()); + } + + for (String key : kafkaSourceTableInfo.getKafkaParamKeys()) { + props.setProperty(key, kafkaSourceTableInfo.getKafkaParam(key)); + } + return props; + } + + protected String generateOperatorName(String tabName, String topicName) { + return SOURCE_OPERATOR_NAME_TPL.replace("${topic}", topicName).replace("${table}", tabName); + } + + protected TypeInformation getRowTypeInformation(KafkaSourceTableInfo kafkaSourceTableInfo) { + Class[] fieldClasses = kafkaSourceTableInfo.getFieldClasses(); + TypeInformation[] types = IntStream.range(0, fieldClasses.length) + .mapToObj(i -> TypeInformation.of(fieldClasses[i])) + .toArray(TypeInformation[]::new); + + return new RowTypeInfo(types, kafkaSourceTableInfo.getFields()); + } + + protected void setStartPosition(String offset, String topicName, FlinkKafkaConsumerBase kafkaSrc) { + if (StringUtils.equalsIgnoreCase(offset, EKafkaOffset.EARLIEST.name())) { + kafkaSrc.setStartFromEarliest(); + } else if (DtStringUtil.isJson(offset)) { + Map specificStartupOffsets = buildOffsetMap(offset, topicName); + kafkaSrc.setStartFromSpecificOffsets(specificStartupOffsets); + } else { + kafkaSrc.setStartFromLatest(); + } + } + + /** + * kafka offset,eg.. {"0":12312,"1":12321,"2":12312} + * @param offsetJson + * @param topicName + * @return + */ + protected Map buildOffsetMap(String offsetJson, String topicName) { + try { + Properties properties = PluginUtil.jsonStrToObject(offsetJson, Properties.class); + Map offsetMap = PluginUtil.objectToMap(properties); + Map specificStartupOffsets = offsetMap + .entrySet() + .stream() + .collect(Collectors.toMap( + (Map.Entry entry) -> new KafkaTopicPartition(topicName, Integer.valueOf(entry.getKey())), + (Map.Entry entry) -> Long.valueOf(entry.getValue().toString())) + ); + + return specificStartupOffsets; + } catch (Exception e) { + throw new RuntimeException("not support offsetReset type:" + offsetJson); + } + } + +} diff --git a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/enums/EKafkaOffset.java b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/enums/EKafkaOffset.java new file mode 100644 index 000000000..476ccc8b7 --- /dev/null +++ b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/enums/EKafkaOffset.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.dtstack.flink.sql.source.kafka.enums; + +/** + * Date: 2020/3/20 + * Company: www.dtstack.com + * @author maqi + */ +public enum EKafkaOffset { + + LATEST, + EARLIEST, + NONE +} diff --git a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java index 8b0b9753c..867f48d6a 100644 --- a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java +++ b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java @@ -19,11 +19,13 @@ package com.dtstack.flink.sql.source.kafka.table; +import com.dtstack.flink.sql.source.kafka.enums.EKafkaOffset; import com.dtstack.flink.sql.table.AbstractSourceParser; import com.dtstack.flink.sql.table.AbstractTableInfo; import com.dtstack.flink.sql.util.MathUtil; import java.util.Map; +import java.util.stream.Collectors; /** * Reason: @@ -37,28 +39,27 @@ public class KafkaSourceParser extends AbstractSourceParser { public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map props) throws Exception { KafkaSourceTableInfo kafkaSourceTableInfo = new KafkaSourceTableInfo(); - kafkaSourceTableInfo.setName(tableName); - kafkaSourceTableInfo.setType(MathUtil.getString(props.get(KafkaSourceTableInfo.TYPE_KEY.toLowerCase()))); parseFieldsInfo(fieldsInfo, kafkaSourceTableInfo); + kafkaSourceTableInfo.setName(tableName); + kafkaSourceTableInfo.setType(MathUtil.getString(props.get(KafkaSourceTableInfo.TYPE_KEY.toLowerCase()))); kafkaSourceTableInfo.setParallelism(MathUtil.getIntegerVal(props.get(KafkaSourceTableInfo.PARALLELISM_KEY.toLowerCase()))); - String bootstrapServer = MathUtil.getString(props.get(KafkaSourceTableInfo.BOOTSTRAPSERVERS_KEY.toLowerCase())); - if (bootstrapServer == null || "".equals(bootstrapServer.trim())) { - throw new Exception("BootstrapServers can not be empty!"); - } else { - kafkaSourceTableInfo.setBootstrapServers(bootstrapServer); - } + kafkaSourceTableInfo.setBootstrapServers(MathUtil.getString(props.get(KafkaSourceTableInfo.BOOTSTRAPSERVERS_KEY.toLowerCase()))); kafkaSourceTableInfo.setGroupId(MathUtil.getString(props.get(KafkaSourceTableInfo.GROUPID_KEY.toLowerCase()))); kafkaSourceTableInfo.setTopic(MathUtil.getString(props.get(KafkaSourceTableInfo.TOPIC_KEY.toLowerCase()))); - kafkaSourceTableInfo.setOffsetReset(MathUtil.getString(props.get(KafkaSourceTableInfo.OFFSETRESET_KEY.toLowerCase()))); + kafkaSourceTableInfo.setOffsetReset(MathUtil.getString(props.getOrDefault(KafkaSourceTableInfo.OFFSETRESET_KEY.toLowerCase(), EKafkaOffset.LATEST.name().toLowerCase()))); kafkaSourceTableInfo.setTopicIsPattern(MathUtil.getBoolean(props.get(KafkaSourceTableInfo.TOPICISPATTERN_KEY.toLowerCase()))); kafkaSourceTableInfo.setTimeZone(MathUtil.getString(props.get(KafkaSourceTableInfo.TIME_ZONE_KEY.toLowerCase()))); - for (String key : props.keySet()) { - if (!key.isEmpty() && key.startsWith("kafka.")) { - kafkaSourceTableInfo.addKafkaParam(key.substring(6), props.get(key).toString()); - } - } + + Map kafkaParams = props.keySet().stream() + .filter(key -> !key.isEmpty() && key.startsWith("kafka.")) + .collect(Collectors.toMap( + key -> key.substring(6), key -> props.get(key).toString()) + ); + + kafkaSourceTableInfo.addKafkaParam(kafkaParams); kafkaSourceTableInfo.check(); + return kafkaSourceTableInfo; } } diff --git a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java index 364cbff49..e1c014a69 100644 --- a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java +++ b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java @@ -17,7 +17,6 @@ */ - package com.dtstack.flink.sql.source.kafka.table; import com.dtstack.flink.sql.format.FormatType; @@ -37,134 +36,118 @@ public class KafkaSourceTableInfo extends AbstractSourceTableInfo { - public static final String BOOTSTRAPSERVERS_KEY = "bootstrapServers"; - - public static final String TOPIC_KEY = "topic"; - - public static final String TYPE_KEY = "type"; + public static final String BOOTSTRAPSERVERS_KEY = "bootstrapServers"; - public static final String GROUPID_KEY = "groupId"; + public static final String TOPIC_KEY = "topic"; - public static final String OFFSETRESET_KEY = "offsetReset"; + public static final String TYPE_KEY = "type"; - public static final String TOPICISPATTERN_KEY = "topicIsPattern"; + public static final String GROUPID_KEY = "groupId"; - private String bootstrapServers; + public static final String OFFSETRESET_KEY = "offsetReset"; - private String topic; + public static final String TOPICISPATTERN_KEY = "topicIsPattern"; - private String groupId; + private String bootstrapServers; - //latest, earliest - private String offsetReset = "latest"; + private String topic; - private String offset; + private String groupId; - private Boolean topicIsPattern = false; + private String offsetReset; - private String sourceDataType = FormatType.DT_NEST.name(); + private Boolean topicIsPattern = false; - private String schemaString; + private String sourceDataType = FormatType.DT_NEST.name(); - private String fieldDelimiter; + private String schemaString; - public String getBootstrapServers() { - return bootstrapServers; - } + private String fieldDelimiter; - public void setBootstrapServers(String bootstrapServers) { - this.bootstrapServers = bootstrapServers; - } + public Map kafkaParam = new HashMap<>(); - public String getTopic() { - return topic; - } - public void setTopic(String topic) { - this.topic = topic; - } + public String getBootstrapServers() { + return bootstrapServers; + } - public String getGroupId() { - return groupId; - } + public void setBootstrapServers(String bootstrapServers) { + this.bootstrapServers = bootstrapServers; + } - public void setGroupId(String groupId) { - this.groupId = groupId; - } + public String getTopic() { + return topic; + } - public String getOffsetReset() { - return offsetReset; - } + public void setTopic(String topic) { + this.topic = topic; + } - public void setOffsetReset(String offsetReset) { - if(offsetReset == null){ - return; - } - this.offsetReset = offsetReset; - } + public String getGroupId() { + return groupId; + } - public String getOffset() { - return offset; - } + public void setGroupId(String groupId) { + this.groupId = groupId; + } - public void setOffset(String offset) { - if (offsetReset == null) { - return; - } - this.offset = offset; - } + public String getOffsetReset() { + return offsetReset; + } - public Boolean getTopicIsPattern() { - return topicIsPattern; - } + public void setOffsetReset(String offsetReset) { + this.offsetReset = offsetReset; + } - public void setTopicIsPattern(Boolean topicIsPattern) { - this.topicIsPattern = topicIsPattern; - } + public Boolean getTopicIsPattern() { + return topicIsPattern; + } - public Map kafkaParam = new HashMap<>(); + public void setTopicIsPattern(Boolean topicIsPattern) { + this.topicIsPattern = topicIsPattern; + } - public void addKafkaParam(String key, String value) { - kafkaParam.put(key, value); - } + public void addKafkaParam(Map kafkaParam) { + kafkaParam.putAll(kafkaParam); + } - public String getKafkaParam(String key) { - return kafkaParam.get(key); - } + public String getKafkaParam(String key) { + return kafkaParam.get(key); + } - public Set getKafkaParamKeys() { - return kafkaParam.keySet(); - } + public Set getKafkaParamKeys() { + return kafkaParam.keySet(); + } - public String getSourceDataType() { - return sourceDataType; - } + public String getSourceDataType() { + return sourceDataType; + } - public void setSourceDataType(String sourceDataType) { - this.sourceDataType = sourceDataType; - } + public void setSourceDataType(String sourceDataType) { + this.sourceDataType = sourceDataType; + } - public String getSchemaString() { - return schemaString; - } + public String getSchemaString() { + return schemaString; + } - public void setSchemaString(String schemaString) { - this.schemaString = schemaString; - } + public void setSchemaString(String schemaString) { + this.schemaString = schemaString; + } - public String getFieldDelimiter() { - return fieldDelimiter; - } + public String getFieldDelimiter() { + return fieldDelimiter; + } - public void setFieldDelimiter(String fieldDelimiter) { - this.fieldDelimiter = fieldDelimiter; - } + public void setFieldDelimiter(String fieldDelimiter) { + this.fieldDelimiter = fieldDelimiter; + } - @Override - public boolean check() { - Preconditions.checkNotNull(getType(), "kafka of type is required"); - Preconditions.checkNotNull(bootstrapServers, "kafka of bootstrapServers is required"); - Preconditions.checkNotNull(topic, "kafka of topic is required"); - return false; - } + @Override + public boolean check() { + Preconditions.checkNotNull(getType(), "kafka of type is required"); + Preconditions.checkNotNull(bootstrapServers, "kafka of bootstrapServers is required"); + Preconditions.checkNotNull(topic, "kafka of topic is required"); + return false; + } } diff --git a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java b/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java index 7e2d77c27..394ea86ee 100644 --- a/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java +++ b/kafka/kafka-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java @@ -18,24 +18,17 @@ package com.dtstack.flink.sql.source.kafka; -import com.dtstack.flink.sql.source.IStreamSourceGener; import com.dtstack.flink.sql.source.kafka.table.KafkaSourceTableInfo; import com.dtstack.flink.sql.table.AbstractSourceTableInfo; -import com.dtstack.flink.sql.util.DtStringUtil; -import com.dtstack.flink.sql.util.PluginUtil; import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.streaming.api.datastream.DataStreamSource; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer; -import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.java.StreamTableEnvironment; import org.apache.flink.types.Row; -import java.util.HashMap; -import java.util.Map; import java.util.Properties; /** @@ -43,75 +36,24 @@ * @create: 2019-11-05 10:55 * @description: **/ -public class KafkaSource implements IStreamSourceGener
{ +public class KafkaSource extends AbstractKafkaSource { - private static final String SOURCE_OPERATOR_NAME_TPL = "${topic}_${table}"; - - /** - * Get kafka data source, you need to provide the data field names, data types - * If you do not specify auto.offset.reset, the default use groupoffset - * - * @param sourceTableInfo - * @return - */ - @SuppressWarnings("rawtypes") @Override public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { - KafkaSourceTableInfo kafkaSourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo; String topicName = kafkaSourceTableInfo.getTopic(); - Properties props = new Properties(); - for (String key : kafkaSourceTableInfo.getKafkaParamKeys()) { - props.setProperty(key, kafkaSourceTableInfo.getKafkaParam(key)); - } - props.setProperty("bootstrap.servers", kafkaSourceTableInfo.getBootstrapServers()); - if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())) { - props.setProperty("auto.offset.reset", "none"); - } else { - props.setProperty("auto.offset.reset", kafkaSourceTableInfo.getOffsetReset()); - } - if (StringUtils.isNotBlank(kafkaSourceTableInfo.getGroupId())) { - props.setProperty("group.id", kafkaSourceTableInfo.getGroupId()); - } - - TypeInformation[] types = new TypeInformation[kafkaSourceTableInfo.getFields().length]; - for (int i = 0; i < kafkaSourceTableInfo.getFieldClasses().length; i++) { - types[i] = TypeInformation.of(kafkaSourceTableInfo.getFieldClasses()[i]); - } - - TypeInformation typeInformation = new RowTypeInfo(types, kafkaSourceTableInfo.getFields()); + Properties kafkaProperties = getKafkaProperties(kafkaSourceTableInfo); + TypeInformation typeInformation = getRowTypeInformation(kafkaSourceTableInfo); + FlinkKafkaConsumer kafkaSrc = (FlinkKafkaConsumer) new KafkaConsumerFactory().createKafkaTableSource(kafkaSourceTableInfo, typeInformation, kafkaProperties); - FlinkKafkaConsumer kafkaSrc = (FlinkKafkaConsumer) new KafkaConsumerFactory().createKafkaTableSource(kafkaSourceTableInfo, typeInformation, props); - - - //earliest,latest - if ("earliest".equalsIgnoreCase(kafkaSourceTableInfo.getOffsetReset())) { - kafkaSrc.setStartFromEarliest(); - } else if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())) {// {"0":12312,"1":12321,"2":12312} - try { - Properties properties = PluginUtil.jsonStrToObject(kafkaSourceTableInfo.getOffsetReset(), Properties.class); - Map offsetMap = PluginUtil.objectToMap(properties); - Map specificStartupOffsets = new HashMap<>(); - for (Map.Entry entry : offsetMap.entrySet()) { - specificStartupOffsets.put(new KafkaTopicPartition(topicName, Integer.valueOf(entry.getKey())), Long.valueOf(entry.getValue().toString())); - } - kafkaSrc.setStartFromSpecificOffsets(specificStartupOffsets); - } catch (Exception e) { - throw new RuntimeException("not support offsetReset type:" + kafkaSourceTableInfo.getOffsetReset()); - } - } else { - kafkaSrc.setStartFromLatest(); - } + String sourceOperatorName = generateOperatorName(sourceTableInfo.getName(), topicName); + DataStreamSource kafkaSource = env.addSource(kafkaSrc, sourceOperatorName, typeInformation); + kafkaSource.setParallelism(kafkaSourceTableInfo.getParallelism()); + setStartPosition(kafkaSourceTableInfo.getOffsetReset(), topicName, kafkaSrc); String fields = StringUtils.join(kafkaSourceTableInfo.getFields(), ","); - String sourceOperatorName = SOURCE_OPERATOR_NAME_TPL.replace("${topic}", topicName).replace("${table}", sourceTableInfo.getName()); - DataStreamSource kafkaSource = env.addSource(kafkaSrc, sourceOperatorName, typeInformation); - Integer parallelism = kafkaSourceTableInfo.getParallelism(); - if (parallelism != null) { - kafkaSource.setParallelism(parallelism); - } return tableEnv.fromDataStream(kafkaSource, fields); } } diff --git a/kafka09/kafka09-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java b/kafka09/kafka09-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java index ffb466a4c..9f8917761 100644 --- a/kafka09/kafka09-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java +++ b/kafka09/kafka09-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java @@ -17,17 +17,13 @@ */ - package com.dtstack.flink.sql.source.kafka; -import com.dtstack.flink.sql.source.IStreamSourceGener; import com.dtstack.flink.sql.source.kafka.table.KafkaSourceTableInfo; import com.dtstack.flink.sql.table.AbstractSourceTableInfo; import com.dtstack.flink.sql.util.DtStringUtil; -import com.dtstack.flink.sql.util.PluginUtil; import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.streaming.api.datastream.DataStreamSource; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer09; @@ -36,7 +32,6 @@ import org.apache.flink.table.api.java.StreamTableEnvironment; import org.apache.flink.types.Row; -import java.util.HashMap; import java.util.Map; import java.util.Properties; @@ -47,74 +42,23 @@ * @author xuchao */ -public class KafkaSource implements IStreamSourceGener
{ - - private static final String SOURCE_OPERATOR_NAME_TPL = "${topic}_${table}"; - - /** - * Get kafka data source, you need to provide the data field names, data types - * If you do not specify auto.offset.reset, the default use groupoffset - * @param sourceTableInfo - * @return - */ - @SuppressWarnings("rawtypes") +public class KafkaSource extends AbstractKafkaSource { @Override public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { - KafkaSourceTableInfo kafkaSourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo; String topicName = kafkaSourceTableInfo.getTopic(); - Properties props = new Properties(); - for (String key : kafkaSourceTableInfo.getKafkaParamKeys()) { - props.setProperty(key, kafkaSourceTableInfo.getKafkaParam(key)); - } - props.setProperty("bootstrap.servers", kafkaSourceTableInfo.getBootstrapServers()); - if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())){ - props.setProperty("auto.offset.reset", "none"); - } else { - props.setProperty("auto.offset.reset", kafkaSourceTableInfo.getOffsetReset()); - } - if (StringUtils.isNotBlank(kafkaSourceTableInfo.getGroupId())){ - props.setProperty("group.id", kafkaSourceTableInfo.getGroupId()); - } - // only required for Kafka 0.8 - //TODO props.setProperty("zookeeper.connect", kafkaSourceTableInfo.) + Properties kafkaProperties = getKafkaProperties(kafkaSourceTableInfo); + TypeInformation typeInformation = getRowTypeInformation(kafkaSourceTableInfo); + FlinkKafkaConsumer09 kafkaSrc = (FlinkKafkaConsumer09) new KafkaConsumer09Factory().createKafkaTableSource(kafkaSourceTableInfo, typeInformation, kafkaProperties); - TypeInformation[] types = new TypeInformation[kafkaSourceTableInfo.getFields().length]; - for(int i = 0; i< kafkaSourceTableInfo.getFieldClasses().length; i++){ - types[i] = TypeInformation.of(kafkaSourceTableInfo.getFieldClasses()[i]); - } - - TypeInformation typeInformation = new RowTypeInfo(types, kafkaSourceTableInfo.getFields()); - FlinkKafkaConsumer09 kafkaSrc = (FlinkKafkaConsumer09) new KafkaConsumer09Factory().createKafkaTableSource(kafkaSourceTableInfo, typeInformation, props); - - //earliest,latest - if("earliest".equalsIgnoreCase(kafkaSourceTableInfo.getOffsetReset())){ - kafkaSrc.setStartFromEarliest(); - }else if(DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())){// {"0":12312,"1":12321,"2":12312} - try { - Properties properties = PluginUtil.jsonStrToObject(kafkaSourceTableInfo.getOffsetReset(), Properties.class); - Map offsetMap = PluginUtil.objectToMap(properties); - Map specificStartupOffsets = new HashMap<>(); - for(Map.Entry entry:offsetMap.entrySet()){ - specificStartupOffsets.put(new KafkaTopicPartition(topicName,Integer.valueOf(entry.getKey())),Long.valueOf(entry.getValue().toString())); - } - kafkaSrc.setStartFromSpecificOffsets(specificStartupOffsets); - } catch (Exception e) { - throw new RuntimeException("not support offsetReset type:" + kafkaSourceTableInfo.getOffsetReset()); - } - }else { - kafkaSrc.setStartFromLatest(); - } + String sourceOperatorName = generateOperatorName(sourceTableInfo.getName(), topicName); + DataStreamSource kafkaSource = env.addSource(kafkaSrc, sourceOperatorName, typeInformation); + kafkaSource.setParallelism(kafkaSourceTableInfo.getParallelism()); + setStartPosition(kafkaSourceTableInfo.getOffsetReset(), topicName, kafkaSrc); String fields = StringUtils.join(kafkaSourceTableInfo.getFields(), ","); - String sourceOperatorName = SOURCE_OPERATOR_NAME_TPL.replace("${topic}", topicName).replace("${table}", sourceTableInfo.getName()); - DataStreamSource kafkaSource = env.addSource(kafkaSrc, sourceOperatorName, typeInformation); - Integer parallelism = kafkaSourceTableInfo.getParallelism(); - if (parallelism != null) { - kafkaSource.setParallelism(parallelism); - } return tableEnv.fromDataStream(kafkaSource, fields); } } \ No newline at end of file diff --git a/kafka10/kafka10-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java b/kafka10/kafka10-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java index ff4aed89d..23989ab7e 100644 --- a/kafka10/kafka10-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java +++ b/kafka10/kafka10-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java @@ -19,24 +19,17 @@ package com.dtstack.flink.sql.source.kafka; -import com.dtstack.flink.sql.source.IStreamSourceGener; import com.dtstack.flink.sql.source.kafka.table.KafkaSourceTableInfo; import com.dtstack.flink.sql.table.AbstractSourceTableInfo; -import com.dtstack.flink.sql.util.DtStringUtil; -import com.dtstack.flink.sql.util.PluginUtil; import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.streaming.api.datastream.DataStreamSource; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010; -import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.java.StreamTableEnvironment; import org.apache.flink.types.Row; -import java.util.HashMap; -import java.util.Map; import java.util.Properties; /** @@ -47,74 +40,24 @@ * @author sishu.yss */ -public class KafkaSource implements IStreamSourceGener
{ +public class KafkaSource extends AbstractKafkaSource { - private static final String SOURCE_OPERATOR_NAME_TPL = "${topic}_${table}"; + @Override + public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { + KafkaSourceTableInfo kafkaSourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo; + String topicName = kafkaSourceTableInfo.getTopic(); - /** - * Get kafka data source, you need to provide the data field names, data types - * If you do not specify auto.offset.reset, the default use groupoffset - * - * @param sourceTableInfo - * @return - */ - @SuppressWarnings("rawtypes") - @Override - public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { + Properties kafkaProperties = getKafkaProperties(kafkaSourceTableInfo); + TypeInformation typeInformation = getRowTypeInformation(kafkaSourceTableInfo); + FlinkKafkaConsumer010 kafkaSrc = (FlinkKafkaConsumer010) new KafkaConsumer010Factory().createKafkaTableSource(kafkaSourceTableInfo, typeInformation, kafkaProperties); - KafkaSourceTableInfo kafkaSourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo; - String topicName = kafkaSourceTableInfo.getTopic(); + String sourceOperatorName = generateOperatorName(sourceTableInfo.getName(), topicName); + DataStreamSource kafkaSource = env.addSource(kafkaSrc, sourceOperatorName, typeInformation); + kafkaSource.setParallelism(kafkaSourceTableInfo.getParallelism()); - Properties props = new Properties(); - for (String key : kafkaSourceTableInfo.getKafkaParamKeys()) { - props.setProperty(key, kafkaSourceTableInfo.getKafkaParam(key)); - } - props.setProperty("bootstrap.servers", kafkaSourceTableInfo.getBootstrapServers()); - if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())){ - props.setProperty("auto.offset.reset", "none"); - } else { - props.setProperty("auto.offset.reset", kafkaSourceTableInfo.getOffsetReset()); - } - if (StringUtils.isNotBlank(kafkaSourceTableInfo.getGroupId())){ - props.setProperty("group.id", kafkaSourceTableInfo.getGroupId()); - } + setStartPosition(kafkaSourceTableInfo.getOffsetReset(), topicName, kafkaSrc); + String fields = StringUtils.join(kafkaSourceTableInfo.getFields(), ","); - TypeInformation[] types = new TypeInformation[kafkaSourceTableInfo.getFields().length]; - for (int i = 0; i < kafkaSourceTableInfo.getFieldClasses().length; i++) { - types[i] = TypeInformation.of(kafkaSourceTableInfo.getFieldClasses()[i]); - } - - TypeInformation typeInformation = new RowTypeInfo(types, kafkaSourceTableInfo.getFields()); - - FlinkKafkaConsumer010 kafkaSrc = (FlinkKafkaConsumer010) new KafkaConsumer010Factory().createKafkaTableSource(kafkaSourceTableInfo, typeInformation, props); - - //earliest,latest - if ("earliest".equalsIgnoreCase(kafkaSourceTableInfo.getOffsetReset())) { - kafkaSrc.setStartFromEarliest(); - } else if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())) {// {"0":12312,"1":12321,"2":12312} - try { - Properties properties = PluginUtil.jsonStrToObject(kafkaSourceTableInfo.getOffsetReset(), Properties.class); - Map offsetMap = PluginUtil.objectToMap(properties); - Map specificStartupOffsets = new HashMap<>(); - for (Map.Entry entry : offsetMap.entrySet()) { - specificStartupOffsets.put(new KafkaTopicPartition(topicName, Integer.valueOf(entry.getKey())), Long.valueOf(entry.getValue().toString())); - } - kafkaSrc.setStartFromSpecificOffsets(specificStartupOffsets); - } catch (Exception e) { - throw new RuntimeException("not support offsetReset type:" + kafkaSourceTableInfo.getOffsetReset()); - } - } else { - kafkaSrc.setStartFromLatest(); - } - - String fields = StringUtils.join(kafkaSourceTableInfo.getFields(), ","); - String sourceOperatorName = SOURCE_OPERATOR_NAME_TPL.replace("${topic}", topicName).replace("${table}", sourceTableInfo.getName()); - - DataStreamSource kafkaSource = env.addSource(kafkaSrc, sourceOperatorName, typeInformation); - Integer parallelism = kafkaSourceTableInfo.getParallelism(); - if (parallelism != null) { - kafkaSource.setParallelism(parallelism); - } - return tableEnv.fromDataStream(kafkaSource, fields); - } + return tableEnv.fromDataStream(kafkaSource, fields); + } } \ No newline at end of file diff --git a/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java b/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java index 11be1898a..f58d59d05 100644 --- a/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java +++ b/kafka11/kafka11-source/src/main/java/com/dtstack/flink/sql/source/kafka/KafkaSource.java @@ -19,24 +19,16 @@ package com.dtstack.flink.sql.source.kafka; -import com.dtstack.flink.sql.source.IStreamSourceGener; import com.dtstack.flink.sql.source.kafka.table.KafkaSourceTableInfo; import com.dtstack.flink.sql.table.AbstractSourceTableInfo; -import com.dtstack.flink.sql.util.DtStringUtil; -import com.dtstack.flink.sql.util.PluginUtil; import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.streaming.api.datastream.DataStreamSource; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011; -import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition; import org.apache.flink.table.api.Table; import org.apache.flink.table.api.java.StreamTableEnvironment; import org.apache.flink.types.Row; - -import java.util.HashMap; -import java.util.Map; import java.util.Properties; /** @@ -47,76 +39,24 @@ * @author sishu.yss */ -public class KafkaSource implements IStreamSourceGener
{ - - private static final String SOURCE_OPERATOR_NAME_TPL = "${topic}_${table}"; - - /** - * Get kafka data source, you need to provide the data field names, data types - * If you do not specify auto.offset.reset, the default use groupoffset - * - * @param sourceTableInfo - * @return - */ - @SuppressWarnings("rawtypes") - @Override - public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { - - KafkaSourceTableInfo kafkaSourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo; - String topicName = kafkaSourceTableInfo.getTopic(); - - Properties props = new Properties(); - for (String key : kafkaSourceTableInfo.getKafkaParamKeys()) { - props.setProperty(key, kafkaSourceTableInfo.getKafkaParam(key)); - } - props.setProperty("bootstrap.servers", kafkaSourceTableInfo.getBootstrapServers()); - if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())){ - props.setProperty("auto.offset.reset", "none"); - } else { - props.setProperty("auto.offset.reset", kafkaSourceTableInfo.getOffsetReset()); - } - if (StringUtils.isNotBlank(kafkaSourceTableInfo.getGroupId())){ - props.setProperty("group.id", kafkaSourceTableInfo.getGroupId()); - } - // only required for Kafka 0.8 - //TODO props.setProperty("zookeeper.connect", kafka09SourceTableInfo.) - - TypeInformation[] types = new TypeInformation[kafkaSourceTableInfo.getFields().length]; - for (int i = 0; i < kafkaSourceTableInfo.getFieldClasses().length; i++) { - types[i] = TypeInformation.of(kafkaSourceTableInfo.getFieldClasses()[i]); - } +public class KafkaSource extends AbstractKafkaSource { - TypeInformation typeInformation = new RowTypeInfo(types, kafkaSourceTableInfo.getFields()); + @Override + public Table genStreamSource(AbstractSourceTableInfo sourceTableInfo, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) { + KafkaSourceTableInfo kafkaSourceTableInfo = (KafkaSourceTableInfo) sourceTableInfo; + String topicName = kafkaSourceTableInfo.getTopic(); - FlinkKafkaConsumer011 kafkaSrc = (FlinkKafkaConsumer011) new KafkaConsumer011Factory().createKafkaTableSource(kafkaSourceTableInfo, typeInformation, props); + Properties kafkaProperties = getKafkaProperties(kafkaSourceTableInfo); + TypeInformation typeInformation = getRowTypeInformation(kafkaSourceTableInfo); + FlinkKafkaConsumer011 kafkaSrc = (FlinkKafkaConsumer011) new KafkaConsumer011Factory().createKafkaTableSource(kafkaSourceTableInfo, typeInformation, kafkaProperties); - //earliest,latest - if ("earliest".equalsIgnoreCase(kafkaSourceTableInfo.getOffsetReset())) { - kafkaSrc.setStartFromEarliest(); - } else if (DtStringUtil.isJosn(kafkaSourceTableInfo.getOffsetReset())) {// {"0":12312,"1":12321,"2":12312} - try { - Properties properties = PluginUtil.jsonStrToObject(kafkaSourceTableInfo.getOffsetReset(), Properties.class); - Map offsetMap = PluginUtil.objectToMap(properties); - Map specificStartupOffsets = new HashMap<>(); - for (Map.Entry entry : offsetMap.entrySet()) { - specificStartupOffsets.put(new KafkaTopicPartition(topicName, Integer.valueOf(entry.getKey())), Long.valueOf(entry.getValue().toString())); - } - kafkaSrc.setStartFromSpecificOffsets(specificStartupOffsets); - } catch (Exception e) { - throw new RuntimeException("not support offsetReset type:" + kafkaSourceTableInfo.getOffsetReset()); - } - } else { - kafkaSrc.setStartFromLatest(); - } + String sourceOperatorName = generateOperatorName(sourceTableInfo.getName(), topicName); + DataStreamSource kafkaSource = env.addSource(kafkaSrc, sourceOperatorName, typeInformation); + kafkaSource.setParallelism(kafkaSourceTableInfo.getParallelism()); - String fields = StringUtils.join(kafkaSourceTableInfo.getFields(), ","); - String sourceOperatorName = SOURCE_OPERATOR_NAME_TPL.replace("${topic}", topicName).replace("${table}", sourceTableInfo.getName()); + setStartPosition(kafkaSourceTableInfo.getOffsetReset(), topicName, kafkaSrc); + String fields = StringUtils.join(kafkaSourceTableInfo.getFields(), ","); - DataStreamSource kafkaSource = env.addSource(kafkaSrc, sourceOperatorName, typeInformation); - Integer parallelism = kafkaSourceTableInfo.getParallelism(); - if (parallelism != null) { - kafkaSource.setParallelism(parallelism); - } - return tableEnv.fromDataStream(kafkaSource, fields); - } + return tableEnv.fromDataStream(kafkaSource, fields); + } } From 0b22f9c7d5c776ad7356b731c95a8704e4dd5530 Mon Sep 17 00:00:00 2001 From: maqi Date: Fri, 20 Mar 2020 17:20:46 +0800 Subject: [PATCH 35/47] add avro params --- docs/kafkaSource.md | 224 ++---------------- .../source/kafka/table/KafkaSourceParser.java | 5 + .../kafka/table/KafkaSourceTableInfo.java | 8 +- 3 files changed, 38 insertions(+), 199 deletions(-) diff --git a/docs/kafkaSource.md b/docs/kafkaSource.md index 76096aba6..00c252c99 100644 --- a/docs/kafkaSource.md +++ b/docs/kafkaSource.md @@ -1,6 +1,5 @@ ## 1.格式: ``` -数据现在支持json格式{"xx":"bb","cc":"dd"} CREATE TABLE tableName( colName colType, @@ -15,9 +14,8 @@ CREATE TABLE tableName( topic ='topicName', groupId='test', parallelism ='parllNum', - --timezone='America/Los_Angeles', timezone='Asia/Shanghai', - sourcedatatype ='json' #可不设置 + sourcedatatype ='dt_nest' #可不设置 ); ``` @@ -47,7 +45,9 @@ CREATE TABLE tableName( |topicIsPattern | topic是否是正则表达式格式(true|false) |否| false |offsetReset | 读取的topic 的offset初始位置[latest|earliest|指定offset值({"0":12312,"1":12321,"2":12312},{"partition_no":offset_value})]|否|latest| |parallelism | 并行度设置|否|1| -|sourcedatatype | 数据类型|否|json| +|sourcedatatype | 数据类型,avro,csv,json,dt_nest。dt_nest为默认JSON解析器,能够解析嵌套JSON数据类型,其他仅支持非嵌套格式|否|dt_nest| +|schemaInfo | avro类型使用的schema信息|否|| +|fieldDelimiter |csv类型使用的数据分隔符|否| | | |timezone|时区设置[timezone支持的参数](timeZone.md)|否|'Asia/Shanghai' **kafka相关参数可以自定义,使用kafka.开头即可。** ``` @@ -169,24 +169,10 @@ CREATE TABLE MyTable( parallelism ='1' ); ``` -# 二、csv格式数据源 -根据字段分隔符进行数据分隔,按顺序匹配sql中配置的列。如数据分隔列数和sql中配置的列数相等直接匹配;如不同参照lengthcheckpolicy策略处理。 -## 1.参数: - -|参数名称|含义|是否必填|默认值| -|----|---|---|---| -|type | kafka09 | 是|| -|bootstrapServers | kafka bootstrap-server 地址信息(多个用逗号隔开)|是|| -|zookeeperQuorum | kafka zk地址信息(多个之间用逗号分隔)|是|| -|topic | 需要读取的 topic 名称|是|| -|offsetReset | 读取的topic 的offset初始位置[latest|earliest]|否|latest| -|parallelism | 并行度设置 |否|1| -|sourcedatatype | 数据类型|是 |csv| -|fielddelimiter | 字段分隔符|是 || -|lengthcheckpolicy | 单行字段条数检查策略 |否|可选,默认为SKIP,其它可选值为EXCEPTION、PAD。SKIP:字段数目不符合时跳过 。EXCEPTION:字段数目不符合时抛出异常。PAD:按顺序填充,不存在的置为null。| -**kafka相关参数可以自定义,使用kafka.开头即可。** -## 2.样例: +## 7.csv格式数据源 + + ``` CREATE TABLE MyTable( name varchar, @@ -203,186 +189,28 @@ CREATE TABLE MyTable( --topic ='mqTest.*', --topicIsPattern='true' parallelism ='1', - sourcedatatype ='csv', - fielddelimiter ='\|', - lengthcheckpolicy = 'PAD' + sourceDatatype ='csv' ); ``` -# 三、text格式数据源UDF自定义拆分 -Kafka源表数据解析流程:Kafka Source Table -> UDTF ->Realtime Compute -> SINK。从Kakfa读入的数据,都是VARBINARY(二进制)格式,对读入的每条数据,都需要用UDTF将其解析成格式化数据。 - 与其他格式不同,本格式定义DDL必须与以下SQL一摸一样,表中的五个字段顺序务必保持一致: - -## 1. 定义源表,注意:kafka源表DDL字段必须与以下例子一模一样。WITH中参数可改。 -``` -create table kafka_stream( - _topic STRING, - _messageKey STRING, - _message STRING, - _partition INT, - _offset BIGINT, -) with ( - type ='kafka09', - bootstrapServers ='172.16.8.198:9092', - zookeeperQuorum ='172.16.8.198:2181/kafka', - offsetReset ='latest', - topic ='nbTest1', - parallelism ='1', - sourcedatatype='text' - ) -``` -## 2.参数: - -|参数名称|含义|是否必填|默认值| -|----|---|---|---| -|type | kafka09 | 是|| -|bootstrapServers | kafka bootstrap-server 地址信息(多个用逗号隔开)|是|| -|zookeeperQuorum | kafka zk地址信息(多个之间用逗号分隔)|是|| -|topic | 需要读取的 topic 名称|是|| -|offsetReset | 读取的topic 的offset初始位置[latest|earliest]|否|latest| -|parallelism | 并行度设置|否|1| -|sourcedatatype | 数据类型|否|text| -**kafka相关参数可以自定义,使用kafka.开头即可。** +## 8.avro格式数据源 -## 2.自定义: -从kafka读出的数据,需要进行窗口计算。 按照实时计算目前的设计,滚窗/滑窗等窗口操作,需要(且必须)在源表DDL上定义Watermark。Kafka源表比较特殊。如果要以kafka中message字段中的的Event Time进行窗口操作, -需要先从message字段,使用UDX解析出event time,才能定义watermark。 在kafka源表场景中,需要使用计算列。 假设,kafka中写入的数据如下: -2018-11-11 00:00:00|1|Anna|female整个计算流程为:Kafka SOURCE->UDTF->Realtime Compute->RDS SINK(单一分隔符可直接使用类csv格式模板,自定义适用于更复杂的数据类型,本说明只做参考) - -**SQL** ``` --- 定义解析Kakfa message的UDTF - CREATE FUNCTION kafkapaser AS 'com.XXXX.kafkaUDTF'; - CREATE FUNCTION kafkaUDF AS 'com.XXXX.kafkaUDF'; - -- 定义源表,注意:kafka源表DDL字段必须与以下例子一模一样。WITH中参数可改。 - create table kafka_src ( - _topic STRING, - _messageKey STRING, - _message STRING, - _partition INT, - _offset BIGINT, - ctime AS TO_TIMESTAMP(kafkaUDF(_message)), -- 定义计算列,计算列可理解为占位符,源表中并没有这一列,其中的数据可经过下游计算得出。注意计算里的类型必须为timestamp才能在做watermark。 - watermark for ctime as withoffset(ctime,0) -- 在计算列上定义watermark - ) WITH ( - type = 'kafka010', -- Kafka Source类型,与Kafka版本强相关,目前支持的Kafka版本请参考本文档 - topic = 'test_kafka_topic', - ... - ); - create table rds_sink ( - name VARCHAR, - age INT, - grade VARCHAR, - updateTime TIMESTAMP - ) WITH( - type='mysql', - url='jdbc:mysql://localhost:3306/test', - tableName='test4', - userName='test', - password='XXXXXX' +CREATE TABLE MyTable( + channel varchar, + pv varchar + --xctime bigint + )WITH( + type='kafka', + bootstrapServers='172.16.8.107:9092', + groupId='mqTest01', + offsetReset='latest', + topic='mqTest01', + parallelism ='1', + topicIsPattern ='false', + kafka.group.id='mqTest', + sourceDataType ='avro', + schemaInfo = '{"type":"record","name":"MyResult","fields":[{"name":"channel","type":"string"},{"name":"pv","type":"string"}]}' ); - -- 使用UDTF,将二进制数据解析成格式化数据 - CREATE VIEW input_view ( - name, - age, - grade, - updateTime - ) AS - SELECT - COUNT(*) as cnt, - T.ctime, - T.order, - T.name, - T.sex - from - kafka_src as S, - LATERAL TABLE (kafkapaser _message)) as T ( - ctime, - order, - name, - sex - ) - Group BY T.sex, - TUMBLE(ctime, INTERVAL '1' MINUTE); - -- 对input_view中输出的数据做计算 - CREATE VIEW view2 ( - cnt, - sex - ) AS - SELECT - COUNT(*) as cnt, - T.sex - from - input_view - Group BY sex, TUMBLE(ctime, INTERVAL '1' MINUTE); - -- 使用解析出的格式化数据进行计算,并将结果输出到RDS中 - insert into rds_sink - SELECT - cnt,sex - from view2; - ``` -**UDF&UDTF** + ``` -package com.XXXX; - import com.XXXX.fastjson.JSONObject; - import org.apache.flink.table.functions.TableFunction; - import org.apache.flink.table.types.DataType; - import org.apache.flink.table.types.DataTypes; - import org.apache.flink.types.Row; - import java.io.UnsupportedEncodingException; - /** - 以下例子解析输入Kafka中的JSON字符串,并将其格式化输出 - **/ - public class kafkaUDTF extends TableFunction { - public void eval(byte[] message) { - try { - // 读入一个二进制数据,并将其转换为String格式 - String msg = new String(message, "UTF-8"); - // 提取JSON Object中各字段 - String ctime = Timestamp.valueOf(data.split('\\|')[0]); - String order = data.split('\\|')[1]; - String name = data.split('\\|')[2]; - String sex = data.split('\\|')[3]; - // 将解析出的字段放到要输出的Row()对象 - Row row = new Row(4); - row.setField(0, ctime); - row.setField(1, age); - row.setField(2, grade); - row.setField(3, updateTime); - System.out.println("Kafka message str ==>" + row.toString()); - // 输出一行 - collect(row); - } catch (ClassCastException e) { - System.out.println("Input data format error. Input data " + msg + "is not json string"); - } - } catch (UnsupportedEncodingException e) { - e.printStackTrace(); - } - } - @Override - // 如果返回值是Row,就必须重载实现这个方法,显式地告诉系统返回的字段类型 - // 定义输出Row()对象的字段类型 - public DataType getResultType(Object[] arguments, Class[] argTypes) { - return DataTypes.createRowType(DataTypes.TIMESTAMP,DataTypes.STRING, DataTypes.Integer, DataTypes.STRING,DataTypes.STRING); - } - } - - package com.dp58; - package com.dp58.sql.udx; - import org.apache.flink.table.functions.FunctionContext; - import org.apache.flink.table.functions.ScalarFunction; - public class KafkaUDF extends ScalarFunction { - // 可选,open方法可以不写 - // 需要import org.apache.flink.table.functions.FunctionContext; - public String eval(byte[] message) { - // 读入一个二进制数据,并将其转换为String格式 - String msg = new String(message, "UTF-8"); - return msg.split('\\|')[0]; - } - public long eval(String b, String c) { - return eval(b) + eval(c); - } - //可选,close方法可以不写 - @Override - public void close() { - } - } - ``` + diff --git a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java index 867f48d6a..96ccd1783 100644 --- a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java +++ b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceParser.java @@ -19,6 +19,7 @@ package com.dtstack.flink.sql.source.kafka.table; +import com.dtstack.flink.sql.format.FormatType; import com.dtstack.flink.sql.source.kafka.enums.EKafkaOffset; import com.dtstack.flink.sql.table.AbstractSourceParser; import com.dtstack.flink.sql.table.AbstractTableInfo; @@ -51,6 +52,10 @@ public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map kafkaParams = props.keySet().stream() .filter(key -> !key.isEmpty() && key.startsWith("kafka.")) .collect(Collectors.toMap( diff --git a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java index e1c014a69..c27eee376 100644 --- a/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java +++ b/kafka-base/kafka-base-source/src/main/java/com/dtstack/flink/sql/source/kafka/table/KafkaSourceTableInfo.java @@ -48,6 +48,12 @@ public class KafkaSourceTableInfo extends AbstractSourceTableInfo { public static final String TOPICISPATTERN_KEY = "topicIsPattern"; + public static final String SCHEMA_STRING_KEY = "schemaInfo"; + + public static final String CSV_FIELD_DELIMITER_KEY = "fieldDelimiter"; + + public static final String SOURCE_DATA_TYPE_KEY = "sourceDataType"; + private String bootstrapServers; private String topic; @@ -58,7 +64,7 @@ public class KafkaSourceTableInfo extends AbstractSourceTableInfo { private Boolean topicIsPattern = false; - private String sourceDataType = FormatType.DT_NEST.name(); + private String sourceDataType; private String schemaString; From ad272dee5efa5225fc0d8e1183144396b70f1315 Mon Sep 17 00:00:00 2001 From: maqi Date: Wed, 25 Mar 2020 15:07:30 +0800 Subject: [PATCH 36/47] fix sonarqube scan --- .../side/cassandra/CassandraAsyncReqRow.java | 3 +-- .../cassandra/CassandraAsyncSideInfo.java | 8 +++++-- .../sql/sink/console/ConsoleOutputFormat.java | 2 +- .../sink/console/table/TablePrintUtil.java | 2 +- .../flink/sql/exec/ExecuteProcessHelper.java | 9 ++------ .../DtNestRowDeserializationSchema.java | 3 ++- .../flink/sql/option/OptionParser.java | 21 +++++++------------ .../dtstack/flink/sql/side/BaseAllReqRow.java | 6 +++++- .../flink/sql/side/JoinNodeDealer.java | 11 +++++----- .../dtstack/flink/sql/side/SideSQLParser.java | 18 +--------------- .../dtstack/flink/sql/side/SideSqlExec.java | 10 ++------- .../com/dtstack/flink/sql/util/MathUtil.java | 2 +- .../Elasticsearch6AsyncReqRow.java | 7 ++++++- .../flink/sql/side/hbase/HbaseAllReqRow.java | 14 ++++++++++--- .../PreRowKeyModeDealerDealer.java | 3 +-- .../rowkeydealer/RowKeyEqualModeDealer.java | 3 +-- .../sink/kafka/CustomerFlinkPartition.java | 8 +++---- .../sql/launcher/ClusterClientFactory.java | 4 ++-- .../perjob/PerJobClusterClientBuilder.java | 3 ++- .../sql/launcher/perjob/PerJobSubmitter.java | 1 - .../sql/side/rdb/all/RdbAllSideInfo.java | 5 ++++- .../sql/side/rdb/async/RdbAsyncSideInfo.java | 6 +++++- .../rdb/format/JDBCUpsertOutputFormat.java | 1 - .../sink/rdb/writer/AbstractUpsertWriter.java | 3 --- 24 files changed, 71 insertions(+), 82 deletions(-) diff --git a/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncReqRow.java b/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncReqRow.java index e1ed6d8e1..7eee3cb5f 100644 --- a/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncReqRow.java +++ b/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncReqRow.java @@ -216,7 +216,7 @@ public void asyncInvoke(CRow input, ResultFuture resultFuture) throws Exce connCassandraDB(cassandraSideTableInfo); String sqlCondition = sideInfo.getSqlCondition() + " " + sqlWhere + " ALLOW FILTERING "; - System.out.println("sqlCondition:" + sqlCondition); + LOG.info("sqlCondition:{}" + sqlCondition); ListenableFuture resultSet = Futures.transformAsync(session, new AsyncFunction() { @@ -265,7 +265,6 @@ public void onSuccess(List rows) { public void onFailure(Throwable t) { LOG.error("Failed to retrieve the data: %s%n", t.getMessage()); - System.out.println("Failed to retrieve the data: " + t.getMessage()); cluster.closeAsync(); resultFuture.completeExceptionally(t); } diff --git a/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncSideInfo.java b/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncSideInfo.java index 82055b94c..4bee5648b 100644 --- a/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncSideInfo.java +++ b/cassandra/cassandra-side/cassandra-async-side/src/main/java/com/dtstack/flink/sql/side/cassandra/CassandraAsyncSideInfo.java @@ -30,6 +30,8 @@ import org.apache.calcite.sql.SqlNode; import org.apache.flink.api.java.typeutils.RowTypeInfo; import com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.List; @@ -42,6 +44,8 @@ public class CassandraAsyncSideInfo extends BaseSideInfo { private static final long serialVersionUID = -4403313049809013362L; + private static final Logger LOG = LoggerFactory.getLogger(CassandraAsyncSideInfo.class.getSimpleName()); + public CassandraAsyncSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); @@ -63,9 +67,9 @@ public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInf } sqlCondition = "select ${selectField} from ${tableName}"; - sqlCondition = sqlCondition.replace("${tableName}", cassandraSideTableInfo.getDatabase()+"."+cassandraSideTableInfo.getTableName()).replace("${selectField}", sideSelectFields); - System.out.println("---------side_exe_sql-----\n" + sqlCondition); + + LOG.info("---------side_exe_sql-----\n{}" + sqlCondition); } diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleOutputFormat.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleOutputFormat.java index e99c76d03..fc5a768c4 100644 --- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleOutputFormat.java +++ b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/ConsoleOutputFormat.java @@ -56,7 +56,7 @@ public void open(int taskNumber, int numTasks) throws IOException { @Override public void writeRecord(Tuple2 tuple2) throws IOException { - System.out.println("received oriainal data:" + tuple2); + LOG.info("received oriainal data:{}" + tuple2); Tuple2 tupleTrans = tuple2; Boolean retract = tupleTrans.getField(0); if (!retract) { diff --git a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java index 6ddc60386..12bb2a5d4 100644 --- a/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java +++ b/console/console-sink/src/main/java/com/dtstack/flink/sql/sink/console/table/TablePrintUtil.java @@ -95,7 +95,7 @@ public static TablePrintUtil build(List data) { try { value = obj.getClass().getMethod(colList.get(j).getMethodName).invoke(data.get(i)).toString(); } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException e) { - e.printStackTrace(); + LOG.error("", e); } item[j] = value == null ? "null" : value; } diff --git a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java index c02c3b142..dbe38044e 100644 --- a/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java +++ b/core/src/main/java/com/dtstack/flink/sql/exec/ExecuteProcessHelper.java @@ -95,11 +95,8 @@ public class ExecuteProcessHelper { public static ParamsInfo parseParams(String[] args) throws Exception { LOG.info("------------program params-------------------------"); - System.out.println("------------program params-------------------------"); Arrays.stream(args).forEach(arg -> LOG.info("{}", arg)); - Arrays.stream(args).forEach(System.out::println); LOG.info("-------------------------------------------"); - System.out.println("----------------------------------------"); OptionParser optionParser = new OptionParser(args); Options options = optionParser.getOptions(); @@ -228,12 +225,10 @@ private static void sqlTranslation(String localSqlPluginPath, //sql-dimensional table contains the dimension table of execution sideSqlExec.exec(result.getExecSql(), sideTableMap, tableEnv, registerTableCache, queryConfig, null); } else { - System.out.println("----------exec sql without dimension join-----------"); - System.out.println("----------real sql exec is--------------------------"); - System.out.println(result.getExecSql()); + LOG.info("----------exec sql without dimension join-----------"); + LOG.info("----------real sql exec is--------------------------\n{}", result.getExecSql()); FlinkSQLExec.sqlUpdate(tableEnv, result.getExecSql(), queryConfig); if (LOG.isInfoEnabled()) { - System.out.println(); LOG.info("exec sql: " + result.getExecSql()); } } diff --git a/core/src/main/java/com/dtstack/flink/sql/format/dtnest/DtNestRowDeserializationSchema.java b/core/src/main/java/com/dtstack/flink/sql/format/dtnest/DtNestRowDeserializationSchema.java index 4c0d68eb2..5f1b1c6f3 100644 --- a/core/src/main/java/com/dtstack/flink/sql/format/dtnest/DtNestRowDeserializationSchema.java +++ b/core/src/main/java/com/dtstack/flink/sql/format/dtnest/DtNestRowDeserializationSchema.java @@ -28,8 +28,9 @@ import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonProcessingException; import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode; import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper; -import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.*; import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ArrayNode; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.NullNode; +import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.flink.types.Row; import java.io.IOException; diff --git a/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java b/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java index e49adfd93..0fd057c5a 100644 --- a/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/option/OptionParser.java @@ -31,7 +31,10 @@ import java.io.File; import java.io.FileInputStream; import java.net.URLEncoder; +import java.util.stream.Stream; + import org.apache.commons.codec.Charsets; +import org.apache.flink.util.FileUtils; /** @@ -92,19 +95,16 @@ public Options getOptions(){ } public List getProgramExeArgList() throws Exception { - Map mapConf = PluginUtil.objectToMap(properties); + Map mapConf = PluginUtil.objectToMap(properties); List args = Lists.newArrayList(); - for(Map.Entry one : mapConf.entrySet()){ + for (Map.Entry one : mapConf.entrySet()) { String key = one.getKey(); Object value = one.getValue(); - if(value == null){ + if (value == null) { continue; - }else if(OPTION_SQL.equalsIgnoreCase(key)){ + } else if (OPTION_SQL.equalsIgnoreCase(key)) { File file = new File(value.toString()); - FileInputStream in = new FileInputStream(file); - byte[] filecontent = new byte[(int) file.length()]; - in.read(filecontent); - String content = new String(filecontent, Charsets.UTF_8.name()); + String content = FileUtils.readFile(file, "UTF-8"); value = URLEncoder.encode(content, Charsets.UTF_8.name()); } args.add("-" + key); @@ -112,9 +112,4 @@ public List getProgramExeArgList() throws Exception { } return args; } - - public static void main(String[] args) throws Exception { - OptionParser optionParser = new OptionParser(args); - System.out.println(optionParser.getOptions()); - } } diff --git a/core/src/main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java b/core/src/main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java index 674239449..0b7fe15b3 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/BaseAllReqRow.java @@ -27,6 +27,8 @@ import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.types.Row; import org.apache.flink.util.Collector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.sql.SQLException; import java.util.concurrent.ScheduledExecutorService; @@ -42,6 +44,8 @@ public abstract class BaseAllReqRow extends RichFlatMapFunction implements ISideReqRow { + private static final Logger LOG = LoggerFactory.getLogger(BaseAllReqRow.class); + protected BaseSideInfo sideInfo; private ScheduledExecutorService es; @@ -59,7 +63,7 @@ public BaseAllReqRow(BaseSideInfo sideInfo){ public void open(Configuration parameters) throws Exception { super.open(parameters); initCache(); - System.out.println("----- all cacheRef init end-----"); + LOG.info("----- all cacheRef init end-----"); //start reload cache thread AbstractSideTableInfo sideTableInfo = sideInfo.getSideTableInfo(); diff --git a/core/src/main/java/com/dtstack/flink/sql/side/JoinNodeDealer.java b/core/src/main/java/com/dtstack/flink/sql/side/JoinNodeDealer.java index a1c3cecb1..411de9380 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/JoinNodeDealer.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/JoinNodeDealer.java @@ -21,6 +21,7 @@ import com.dtstack.flink.sql.config.CalciteConfig; import com.dtstack.flink.sql.util.TableUtils; +import com.esotericsoftware.minlog.Log; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Sets; @@ -264,11 +265,10 @@ private void extractTemporaryQuery(SqlNode node, String tableAlias, SqlBasicCall SqlBasicCall sqlBasicCall = buildAsSqlNode(tableAlias, sqlNode); queueInfo.offer(sqlBasicCall); - System.out.println("-------build temporary query-----------"); - System.out.println(tmpSelectSql); - System.out.println("---------------------------------------"); - }catch (Exception e){ - e.printStackTrace(); + Log.info("-------build temporary query-----------\n{}", tmpSelectSql); + Log.info("---------------------------------------"); + } catch (Exception e) { + Log.error("", e); throw new RuntimeException(e); } } @@ -389,7 +389,6 @@ private void extractSelectField(SqlNode selectNode, } }else if(selectNode.getKind() == CASE){ - System.out.println("selectNode"); SqlCase sqlCase = (SqlCase) selectNode; SqlNodeList whenOperands = sqlCase.getWhenOperands(); SqlNodeList thenOperands = sqlCase.getThenOperands(); diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java b/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java index 9be1551aa..d21effb0c 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/SideSQLParser.java @@ -22,46 +22,32 @@ import com.dtstack.flink.sql.config.CalciteConfig; import com.dtstack.flink.sql.util.TableUtils; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Queues; import com.google.common.collect.Sets; -import org.apache.calcite.sql.JoinType; -import org.apache.calcite.sql.SqlAsOperator; import org.apache.calcite.sql.SqlBasicCall; -import org.apache.calcite.sql.SqlBinaryOperator; -import org.apache.calcite.sql.SqlDataTypeSpec; import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlInsert; import org.apache.calcite.sql.SqlJoin; import org.apache.calcite.sql.SqlKind; -import org.apache.calcite.sql.SqlLiteral; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlNodeList; -import org.apache.calcite.sql.SqlOperator; import org.apache.calcite.sql.SqlOrderBy; import org.apache.calcite.sql.SqlSelect; import org.apache.calcite.sql.SqlWith; import org.apache.calcite.sql.SqlWithItem; -import org.apache.calcite.sql.fun.SqlCase; -import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.calcite.sql.parser.SqlParseException; import org.apache.calcite.sql.parser.SqlParser; -import org.apache.calcite.sql.parser.SqlParserPos; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.table.api.Table; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; -import static org.apache.calcite.sql.SqlKind.*; +import static org.apache.calcite.sql.SqlKind.IDENTIFIER; /** * Parsing sql, obtain execution information dimension table @@ -76,8 +62,6 @@ public class SideSQLParser { private Map localTableCache = Maps.newHashMap(); public Queue getExeQueue(String exeSql, Set sideTableSet) throws SqlParseException { - System.out.println("----------exec original Sql----------"); - System.out.println(exeSql); LOG.info("----------exec original Sql----------"); LOG.info(exeSql); diff --git a/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java b/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java index e828bec03..1a7b05416 100644 --- a/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java +++ b/core/src/main/java/com/dtstack/flink/sql/side/SideSqlExec.java @@ -143,11 +143,9 @@ public void exec(String sql, Map sideTableMap, St } if(pollSqlNode.getKind() == INSERT){ - System.out.println("----------real exec sql-----------" ); - System.out.println(pollSqlNode.toString()); FlinkSQLExec.sqlUpdate(tableEnv, pollSqlNode.toString(), queryConfig); if(LOG.isInfoEnabled()){ - LOG.info("exec sql: " + pollSqlNode.toString()); + LOG.info("----------real exec sql-----------\n{}", pollSqlNode.toString()); } }else if(pollSqlNode.getKind() == AS){ @@ -178,8 +176,7 @@ public void exec(String sql, Map sideTableMap, St } }else if (pollObj instanceof JoinInfo){ - System.out.println("----------exec join info----------"); - System.out.println(pollObj.toString()); + LOG.info("----------exec join info----------\n{}", pollObj.toString()); preIsSideJoin = true; joinFun(pollObj, localTableCache, sideTableMap, tableEnv, replaceInfoList); } @@ -408,8 +405,6 @@ private void replaceFieldName(SqlNode sqlNode, FieldReplaceInfo replaceInfo) { } } }else{ - //TODO - System.out.println(sqlNode); throw new RuntimeException("---not deal type:" + sqlNode); } @@ -644,7 +639,6 @@ private SqlNode replaceSelectFieldName(SqlNode selectNode, FieldReplaceInfo repl return selectNode; }else if(selectNode.getKind() == CASE){ - System.out.println("selectNode"); SqlCase sqlCase = (SqlCase) selectNode; SqlNodeList whenOperands = sqlCase.getWhenOperands(); SqlNodeList thenOperands = sqlCase.getThenOperands(); diff --git a/core/src/main/java/com/dtstack/flink/sql/util/MathUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/MathUtil.java index 8708f86d7..966b82e3e 100644 --- a/core/src/main/java/com/dtstack/flink/sql/util/MathUtil.java +++ b/core/src/main/java/com/dtstack/flink/sql/util/MathUtil.java @@ -219,7 +219,7 @@ public static BigDecimal getBigDecimal(Object obj) { } else if (obj instanceof BigInteger) { return new BigDecimal((BigInteger) obj); } else if (obj instanceof Number) { - return new BigDecimal(((Number) obj).doubleValue()); + return BigDecimal.valueOf(((Number) obj).doubleValue()); } throw new RuntimeException("not support type of " + obj.getClass() + " convert to BigDecimal."); } diff --git a/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncReqRow.java b/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncReqRow.java index e8164edb2..1f4bd1bf1 100644 --- a/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncReqRow.java +++ b/elasticsearch6/elasticsearch6-side/elasticsearch6-async-side/src/main/java/com/dtstack/flink/sql/side/elasticsearch6/Elasticsearch6AsyncReqRow.java @@ -18,6 +18,12 @@ package com.dtstack.flink.sql.side.elasticsearch6; +import com.dtstack.flink.sql.side.AbstractSideTableInfo; +import com.dtstack.flink.sql.side.BaseAsyncReqRow; +import com.dtstack.flink.sql.side.CacheMissVal; +import com.dtstack.flink.sql.side.FieldInfo; +import com.dtstack.flink.sql.side.JoinInfo; +import com.dtstack.flink.sql.side.PredicateInfo; import org.apache.flink.api.java.typeutils.RowTypeInfo; import org.apache.flink.configuration.Configuration; import org.apache.flink.streaming.api.functions.async.ResultFuture; @@ -26,7 +32,6 @@ import org.apache.flink.types.Row; import com.dtstack.flink.sql.enums.ECacheContentType; -import com.dtstack.flink.sql.side.*; import com.dtstack.flink.sql.side.cache.CacheObj; import com.dtstack.flink.sql.side.elasticsearch6.table.Elasticsearch6SideTableInfo; import com.dtstack.flink.sql.side.elasticsearch6.util.Es6Util; diff --git a/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java b/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java index 6f7720472..ed2931fff 100644 --- a/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java +++ b/hbase/hbase-side/hbase-all-side/src/main/java/com/dtstack/flink/sql/side/hbase/HbaseAllReqRow.java @@ -182,9 +182,17 @@ private void loadData(Map> tmpCache) throws SQLExcep LOG.error("", e); } finally { try { - conn.close(); - table.close(); - resultScanner.close(); + if (null != conn && !conn.isClosed()) { + conn.close(); + } + + if (null != table) { + table.close(); + } + + if (null != resultScanner) { + resultScanner.close(); + } } catch (IOException e) { LOG.error("", e); } diff --git a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java index 3fad216b2..dcd50131d 100644 --- a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java +++ b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/PreRowKeyModeDealerDealer.java @@ -110,8 +110,7 @@ private String dealOneRow(ArrayList> args, String rowKeyStr, for (String key : colNames) { Object val = sideMap.get(key); if (val == null) { - System.out.println("can't get data with column " + key); - LOG.error("can't get data with column " + key); + LOG.error("can't get data with column {}", key); } sideVal.add(val); diff --git a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java index 01f43b246..41208c7f3 100644 --- a/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java +++ b/hbase/hbase-side/hbase-async-side/src/main/java/com/dtstack/flink/sql/side/hbase/rowkeydealer/RowKeyEqualModeDealer.java @@ -86,8 +86,7 @@ public void asyncGetData(String tableName, String rowKeyStr, CRow input, ResultF for(String key : colNames){ Object val = sideMap.get(key); if(val == null){ - System.out.println("can't get data with column " + key); - LOG.error("can't get data with column " + key); + LOG.error("can't get data with column {}", key); } sideVal.add(val); diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerFlinkPartition.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerFlinkPartition.java index e212d1f57..90dfe996b 100644 --- a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerFlinkPartition.java +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/CustomerFlinkPartition.java @@ -11,13 +11,13 @@ public class CustomerFlinkPartition extends FlinkKafkaPartitioner { public CustomerFlinkPartition() { } - + @Override public void open(int parallelInstanceId, int parallelInstances) { Preconditions.checkArgument(parallelInstanceId >= 0, "Id of this subtask cannot be negative."); Preconditions.checkArgument(parallelInstances > 0, "Number of subtasks must be larger than 0."); this.parallelInstanceId = parallelInstanceId; } - + @Override public int partition(T record, byte[] key, byte[] value, String targetTopic, int[] partitions) { Preconditions.checkArgument(partitions != null && partitions.length > 0, "Partitions of the target topic is empty."); if(key == null){ @@ -25,11 +25,11 @@ public int partition(T record, byte[] key, byte[] value, String targetTopic, int } return partitions[Math.abs(new String(key).hashCode()) % partitions.length]; } - + @Override public boolean equals(Object o) { return this == o || o instanceof CustomerFlinkPartition; } - + @Override public int hashCode() { return CustomerFlinkPartition.class.hashCode(); } diff --git a/launcher/src/main/java/com/dtstack/flink/sql/launcher/ClusterClientFactory.java b/launcher/src/main/java/com/dtstack/flink/sql/launcher/ClusterClientFactory.java index 7f4ded520..8f0090db5 100644 --- a/launcher/src/main/java/com/dtstack/flink/sql/launcher/ClusterClientFactory.java +++ b/launcher/src/main/java/com/dtstack/flink/sql/launcher/ClusterClientFactory.java @@ -21,11 +21,11 @@ import com.dtstack.flink.sql.enums.ClusterMode; import com.dtstack.flink.sql.option.Options; import com.dtstack.flink.sql.util.PluginUtil; +import com.esotericsoftware.minlog.Log; import org.apache.commons.io.Charsets; import org.apache.commons.lang.StringUtils; import org.apache.flink.client.program.ClusterClient; import org.apache.flink.client.program.MiniClusterClient; -import org.apache.flink.configuration.ConfigConstants; import org.apache.flink.configuration.Configuration; import org.apache.flink.configuration.GlobalConfiguration; import org.apache.flink.configuration.JobManagerOptions; @@ -109,7 +109,7 @@ public static ClusterClient createYarnSessionClient(Options launcherOptions) { applicationId = getYarnClusterApplicationId(yarnClient); } - System.out.println("applicationId=" + applicationId.toString()); + Log.info("applicationId={}", applicationId.toString()); if (StringUtils.isEmpty(applicationId.toString())) { throw new RuntimeException("No flink session found on yarn cluster."); diff --git a/launcher/src/main/java/com/dtstack/flink/sql/launcher/perjob/PerJobClusterClientBuilder.java b/launcher/src/main/java/com/dtstack/flink/sql/launcher/perjob/PerJobClusterClientBuilder.java index 3de1cdbc5..0e5089bc9 100644 --- a/launcher/src/main/java/com/dtstack/flink/sql/launcher/perjob/PerJobClusterClientBuilder.java +++ b/launcher/src/main/java/com/dtstack/flink/sql/launcher/perjob/PerJobClusterClientBuilder.java @@ -21,6 +21,7 @@ import com.dtstack.flink.sql.enums.EPluginLoadMode; import com.dtstack.flink.sql.launcher.YarnConfLoader; import com.dtstack.flink.sql.option.Options; +import com.esotericsoftware.minlog.Log; import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.cache.DistributedCache; import org.apache.flink.configuration.Configuration; @@ -79,7 +80,7 @@ public void init(String yarnConfDir, Configuration flinkConfig, Properties userC yarnClient.init(yarnConf); yarnClient.start(); - System.out.println("----init yarn success ----"); + Log.info("----init yarn success ----"); } public AbstractYarnClusterDescriptor createPerJobClusterDescriptor(String flinkJarPath, Options launcherOptions, JobGraph jobGraph) diff --git a/launcher/src/main/java/com/dtstack/flink/sql/launcher/perjob/PerJobSubmitter.java b/launcher/src/main/java/com/dtstack/flink/sql/launcher/perjob/PerJobSubmitter.java index 29cc4890a..ca2d5bdd7 100644 --- a/launcher/src/main/java/com/dtstack/flink/sql/launcher/perjob/PerJobSubmitter.java +++ b/launcher/src/main/java/com/dtstack/flink/sql/launcher/perjob/PerJobSubmitter.java @@ -73,7 +73,6 @@ public static String submit(Options launcherOptions, JobGraph jobGraph, Configur String flinkJobId = jobGraph.getJobID().toString(); String tips = String.format("deploy per_job with appId: %s, jobId: %s", applicationId, flinkJobId); - System.out.println(tips); LOG.info(tips); return applicationId; diff --git a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllSideInfo.java b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllSideInfo.java index 08dc56149..143017133 100644 --- a/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllSideInfo.java +++ b/rdb/rdb-side/src/main/java/com/dtstack/flink/sql/side/rdb/all/RdbAllSideInfo.java @@ -31,6 +31,8 @@ import org.apache.calcite.sql.SqlNode; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.Arrays; import java.util.List; @@ -47,6 +49,7 @@ public class RdbAllSideInfo extends BaseSideInfo { private static final long serialVersionUID = -5858335638589472159L; + private static final Logger LOG = LoggerFactory.getLogger(RdbAllSideInfo.class.getSimpleName()); public RdbAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { @@ -57,7 +60,7 @@ public RdbAllSideInfo(RowTypeInfo rowTypeInfo, JoinInfo joinInfo, List outFieldInfoList, AbstractSideTableInfo sideTableInfo) { super(rowTypeInfo, joinInfo, outFieldInfoList, sideTableInfo); @@ -74,7 +78,7 @@ public void buildEqualInfo(JoinInfo joinInfo, AbstractSideTableInfo sideTableInf sqlCondition = getSelectFromStatement(getTableName(rdbSideTableInfo), Arrays.asList(StringUtils.split(sideSelectFields, ",")), equalFieldList, sqlJoinCompareOperate, sideTableInfo.getPredicateInfoes()); - System.out.println("----------dimension sql query-----------\n" + sqlCondition); + LOG.info("----------dimension sql query-----------\n{}", sqlCondition); } diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/JDBCUpsertOutputFormat.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/JDBCUpsertOutputFormat.java index b01595bb9..e8b6dc8a4 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/JDBCUpsertOutputFormat.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/format/JDBCUpsertOutputFormat.java @@ -114,7 +114,6 @@ public void open(int taskNumber, int numTasks) throws IOException { if (StringUtils.equalsIgnoreCase(updateMode, EUpdateMode.APPEND.name()) || keyFields == null || keyFields.length == 0) { String insertSql = dialect.getInsertIntoStatement(schema, tableName, fieldNames, partitionFields); LOG.info("execute insert sql: {}", insertSql); - System.out.println("execute insert sql :" + insertSql); jdbcWriter = new AppendOnlyWriter(insertSql, fieldTypes, this); } else { jdbcWriter = AbstractUpsertWriter.create( diff --git a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java index 9ca0fd754..2c070b680 100644 --- a/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java +++ b/rdb/rdb-sink/src/main/java/com/dtstack/flink/sql/sink/rdb/writer/AbstractUpsertWriter.java @@ -70,11 +70,9 @@ public static AbstractUpsertWriter create( String deleteSql = dialect.getDeleteStatement(schema, tableName, keyFields); LOG.info("deleteSQL is :{}", deleteSql); - System.out.println("deleteSQL is :" + deleteSql); Optional upsertSql = dialect.getUpsertStatement(schema, tableName, fieldNames, keyFields, allReplace); LOG.info("execute UpsertStatement: {}", upsertSql.orElse("use UsingInsertUpdateStatement")); - System.out.println("execute UpsertStatement: " + upsertSql.orElse("use UsingInsertUpdateStatement")); return upsertSql.map((Function) sql -> new UpsertWriterUsingUpsertStatement( @@ -171,7 +169,6 @@ public void executeUpdate(Connection connection) throws SQLException { } connection.commit(); } catch (Exception e) { - System.out.println(e.getCause()); // deal pg error: current transaction is aborted, commands ignored until end of transaction block connection.rollback(); connection.commit(); From 7bcb8ac449c61656cb3eb2542c5d7a23a2a1fce6 Mon Sep 17 00:00:00 2001 From: maqi Date: Wed, 25 Mar 2020 15:25:09 +0800 Subject: [PATCH 37/47] remove unuse class --- .../flink/sql/side/redis/RedisAllReqRow.java | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java index f484c12b3..5d09c8f3d 100644 --- a/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java +++ b/redis5/redis5-side/redis-all-side/src/main/java/com/dtstack/flink/sql/side/redis/RedisAllReqRow.java @@ -36,21 +36,27 @@ import org.apache.flink.table.runtime.types.CRow; import org.apache.flink.types.Row; import org.apache.flink.util.Collector; -import org.apache.flink.api.java.typeutils.RowTypeInfo; import com.google.common.collect.Maps; -import org.apache.flink.table.runtime.types.CRow; -import org.apache.flink.types.Row; -import org.apache.flink.util.Collector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import redis.clients.jedis.*; +import redis.clients.jedis.HostAndPort; +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.JedisCommands; +import redis.clients.jedis.JedisPool; +import redis.clients.jedis.JedisSentinelPool; import java.io.Closeable; import java.io.IOException; import java.sql.SQLException; -import java.util.*; + +import java.util.Calendar; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; /** * @author yanxi */ From 2353b7fdcd52f1f96ccb9ae06480954222285bfb Mon Sep 17 00:00:00 2001 From: xuchao Date: Thu, 26 Mar 2020 10:41:47 +0800 Subject: [PATCH 38/47] =?UTF-8?q?=E6=B7=BB=E5=8A=A0sonar=20=E4=BB=A3?= =?UTF-8?q?=E7=A0=81=E8=B4=A8=E9=87=8F=E6=A3=80=E6=9F=A5=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitlab-ci.yml | 10 ++++++++++ ci/sonar_notify.sh | 14 ++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 .gitlab-ci.yml create mode 100644 ci/sonar_notify.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 000000000..029098c87 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,10 @@ +build: + stage: test + script: + - mvn clean org.jacoco:jacoco-maven-plugin:0.7.8:prepare-agent package -Dmaven.test.failure.ignore=true -q + - mvn sonar:sonar -Dsonar.projectKey="dt-insight-engine/flinkStreamSQL" -Dsonar.host.url=http://172.16.100.198:9000 -Dsonar.jdbc.url=jdbc:postgresql://172.16.100.198:5432/sonar -Dsonar.java.binaries=target/sonar + - sh ci/sonar_notify.sh + only: + - 1.8_dev + tags: + - dt-insight-engine \ No newline at end of file diff --git a/ci/sonar_notify.sh b/ci/sonar_notify.sh new file mode 100644 index 000000000..41f8a3c0e --- /dev/null +++ b/ci/sonar_notify.sh @@ -0,0 +1,14 @@ +#!/bin/bash +#参考钉钉文档 https://open-doc.dingtalk.com/microapp/serverapi2/qf2nxq + sonarreport=$(curl -s http://172.16.100.198:8082/?projectname=dt-insight-engine/flinkStreamSQL) + curl -s "https://oapi.dingtalk.com/robot/send?access_token=71555061297a53d3ac922a6f4d94285d8e23bccdca0c00b4dc6df0a2d49da724" \ + -H "Content-Type: application/json" \ + -d "{ + \"msgtype\": \"markdown\", + \"markdown\": { + \"title\":\"sonar代码质量\", + \"text\": \"## sonar代码质量报告: \n +> [sonar地址](http://172.16.100.198:9000/dashboard?id=dt-insight-engine/flinkStreamSQL) \n +> ${sonarreport} \n\" + } + }" \ No newline at end of file From b169a349e4ab14461731c64104932005f7f52fea Mon Sep 17 00:00:00 2001 From: xuchao Date: Thu, 26 Mar 2020 10:52:33 +0800 Subject: [PATCH 39/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=B3=A8=E9=87=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java b/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java index 462eed30b..a8d69597d 100644 --- a/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java +++ b/core/src/main/java/com/dtstack/flink/sql/util/DateUtil.java @@ -37,7 +37,7 @@ /** * - * Reason: TODO ADD REASON(可选) + * 日期工具 * Date: 2017年03月10日 下午1:16:37 * Company: www.dtstack.com * @author sishu.yss From 45013fab4abb379a6043a7bc02a877b966c33b8b Mon Sep 17 00:00:00 2001 From: xuchao Date: Thu, 26 Mar 2020 11:01:23 +0800 Subject: [PATCH 40/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9sonar=20=E9=85=8D?= =?UTF-8?q?=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 029098c87..39e0d4746 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -5,6 +5,6 @@ build: - mvn sonar:sonar -Dsonar.projectKey="dt-insight-engine/flinkStreamSQL" -Dsonar.host.url=http://172.16.100.198:9000 -Dsonar.jdbc.url=jdbc:postgresql://172.16.100.198:5432/sonar -Dsonar.java.binaries=target/sonar - sh ci/sonar_notify.sh only: - - 1.8_dev + - v1.8.0_dev tags: - dt-insight-engine \ No newline at end of file From 2d9a65efe53461e9c772284bd79c414565891114 Mon Sep 17 00:00:00 2001 From: xuchao Date: Fri, 27 Mar 2020 13:55:04 +0800 Subject: [PATCH 41/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9sonar=20=E9=85=8D?= =?UTF-8?q?=E7=BD=AE=EF=BC=8C=E6=B7=BB=E5=8A=A0login-token?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 39e0d4746..242b309a9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,7 +2,7 @@ build: stage: test script: - mvn clean org.jacoco:jacoco-maven-plugin:0.7.8:prepare-agent package -Dmaven.test.failure.ignore=true -q - - mvn sonar:sonar -Dsonar.projectKey="dt-insight-engine/flinkStreamSQL" -Dsonar.host.url=http://172.16.100.198:9000 -Dsonar.jdbc.url=jdbc:postgresql://172.16.100.198:5432/sonar -Dsonar.java.binaries=target/sonar + - mvn sonar:sonar -Dsonar.projectKey="dt-insight-engine/flinkStreamSQL" -Dsonar.login= 11974c5e9a29625efa09fdc3c3fdc031efb1aab1 -Dsonar.host.url=http://172.16.100.198:9000 -Dsonar.jdbc.url=jdbc:postgresql://172.16.100.198:5432/sonar -Dsonar.java.binaries=target/sonar - sh ci/sonar_notify.sh only: - v1.8.0_dev From 0ae4bb331c6d081dd8ced9c8f77fbfa89223f98b Mon Sep 17 00:00:00 2001 From: xuchao Date: Fri, 27 Mar 2020 15:04:06 +0800 Subject: [PATCH 42/47] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=B5=8B=E8=AF=95?= =?UTF-8?q?=E7=B1=BB=E5=90=8D=E7=A7=B0=E9=94=99=E8=AF=AFpackage?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../test/java/com/dtstack/flinkx/AppTest.java | 58 ------------------- .../test/java/com/dtstack/flinkx/AppTest.java | 58 ------------------- .../test/java/com/dtstack/flinkx/AppTest.java | 58 ------------------- 3 files changed, 174 deletions(-) delete mode 100644 cassandra/cassandra-sink/src/test/java/com/dtstack/flinkx/AppTest.java delete mode 100644 mongo/mongo-sink/src/test/java/com/dtstack/flinkx/AppTest.java delete mode 100644 mysql/mysql-sink/src/test/java/com/dtstack/flinkx/AppTest.java diff --git a/cassandra/cassandra-sink/src/test/java/com/dtstack/flinkx/AppTest.java b/cassandra/cassandra-sink/src/test/java/com/dtstack/flinkx/AppTest.java deleted file mode 100644 index 33a0233ac..000000000 --- a/cassandra/cassandra-sink/src/test/java/com/dtstack/flinkx/AppTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.dtstack.flinkx; - -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -/** - * Unit test for simple App. - */ -public class AppTest - extends TestCase -{ - /** - * Create the test case - * - * @param testName name of the test case - */ - public AppTest( String testName ) - { - super( testName ); - } - - /** - * @return the suite of tests being tested - */ - public static Test suite() - { - return new TestSuite( AppTest.class ); - } - - /** - * Rigourous Test :-) - */ - public void testApp() - { - assertTrue( true ); - } -} diff --git a/mongo/mongo-sink/src/test/java/com/dtstack/flinkx/AppTest.java b/mongo/mongo-sink/src/test/java/com/dtstack/flinkx/AppTest.java deleted file mode 100644 index 33a0233ac..000000000 --- a/mongo/mongo-sink/src/test/java/com/dtstack/flinkx/AppTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.dtstack.flinkx; - -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -/** - * Unit test for simple App. - */ -public class AppTest - extends TestCase -{ - /** - * Create the test case - * - * @param testName name of the test case - */ - public AppTest( String testName ) - { - super( testName ); - } - - /** - * @return the suite of tests being tested - */ - public static Test suite() - { - return new TestSuite( AppTest.class ); - } - - /** - * Rigourous Test :-) - */ - public void testApp() - { - assertTrue( true ); - } -} diff --git a/mysql/mysql-sink/src/test/java/com/dtstack/flinkx/AppTest.java b/mysql/mysql-sink/src/test/java/com/dtstack/flinkx/AppTest.java deleted file mode 100644 index 33a0233ac..000000000 --- a/mysql/mysql-sink/src/test/java/com/dtstack/flinkx/AppTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.dtstack.flinkx; - -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -/** - * Unit test for simple App. - */ -public class AppTest - extends TestCase -{ - /** - * Create the test case - * - * @param testName name of the test case - */ - public AppTest( String testName ) - { - super( testName ); - } - - /** - * @return the suite of tests being tested - */ - public static Test suite() - { - return new TestSuite( AppTest.class ); - } - - /** - * Rigourous Test :-) - */ - public void testApp() - { - assertTrue( true ); - } -} From eafed59c1ab647d636d0d4113a12211cdc98aa55 Mon Sep 17 00:00:00 2001 From: xuchao Date: Fri, 27 Mar 2020 17:00:21 +0800 Subject: [PATCH 43/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9sonar=20login=20?= =?UTF-8?q?=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 242b309a9..981a47148 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,7 +2,7 @@ build: stage: test script: - mvn clean org.jacoco:jacoco-maven-plugin:0.7.8:prepare-agent package -Dmaven.test.failure.ignore=true -q - - mvn sonar:sonar -Dsonar.projectKey="dt-insight-engine/flinkStreamSQL" -Dsonar.login= 11974c5e9a29625efa09fdc3c3fdc031efb1aab1 -Dsonar.host.url=http://172.16.100.198:9000 -Dsonar.jdbc.url=jdbc:postgresql://172.16.100.198:5432/sonar -Dsonar.java.binaries=target/sonar + - mvn sonar:sonar -Dsonar.projectKey="dt-insight-engine/flinkStreamSQL" -Dsonar.login=11974c5e9a29625efa09fdc3c3fdc031efb1aab1 -Dsonar.host.url=http://172.16.100.198:9000 -Dsonar.jdbc.url=jdbc:postgresql://172.16.100.198:5432/sonar -Dsonar.java.binaries=target/sonar - sh ci/sonar_notify.sh only: - v1.8.0_dev From 59b996b53d3b164565cbdd683a343c8915c1e52c Mon Sep 17 00:00:00 2001 From: xuchao Date: Fri, 27 Mar 2020 17:35:53 +0800 Subject: [PATCH 44/47] =?UTF-8?q?=E4=BF=AE=E6=94=B9sonar=20=E5=88=86?= =?UTF-8?q?=E6=94=AF=20=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 981a47148..2952551ff 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,7 +2,7 @@ build: stage: test script: - mvn clean org.jacoco:jacoco-maven-plugin:0.7.8:prepare-agent package -Dmaven.test.failure.ignore=true -q - - mvn sonar:sonar -Dsonar.projectKey="dt-insight-engine/flinkStreamSQL" -Dsonar.login=11974c5e9a29625efa09fdc3c3fdc031efb1aab1 -Dsonar.host.url=http://172.16.100.198:9000 -Dsonar.jdbc.url=jdbc:postgresql://172.16.100.198:5432/sonar -Dsonar.java.binaries=target/sonar + - mvn sonar:sonar -Dsonar.projectKey="dt-insight-engine/flinkStreamSQL" -Dsonar.branch.name="v1.8.0_dev" -Dsonar.login=11974c5e9a29625efa09fdc3c3fdc031efb1aab1 -Dsonar.host.url=http://172.16.100.198:9000 -Dsonar.jdbc.url=jdbc:postgresql://172.16.100.198:5432/sonar -Dsonar.java.binaries=target/sonar - sh ci/sonar_notify.sh only: - v1.8.0_dev From b5c6b4a17282b4f20f9c941114b29ffd45807d5d Mon Sep 17 00:00:00 2001 From: xuchao Date: Mon, 30 Mar 2020 12:02:30 +0800 Subject: [PATCH 45/47] =?UTF-8?q?=E5=8E=BB=E9=99=A4=E6=97=A0=E7=94=A8?= =?UTF-8?q?=E7=9A=84=E5=BC=95=E7=94=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../flink/sql/sink/kafka/AbstractKafkaProducerFactory.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java index 9dcaf222b..9958a2544 100644 --- a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaProducerFactory.java @@ -26,12 +26,9 @@ import org.apache.commons.lang3.StringUtils; import org.apache.flink.api.common.serialization.SerializationSchema; import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.formats.avro.AvroRowSerializationSchema; -import org.apache.flink.formats.csv.CsvRowSerializationSchema; import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; import org.apache.flink.table.runtime.types.CRow; -import org.apache.flink.types.Row; import java.util.Optional; import java.util.Properties; From ba3680f3ef4142826d7314b958689ddb5417f2cf Mon Sep 17 00:00:00 2001 From: maqi Date: Mon, 30 Mar 2020 13:40:52 +0800 Subject: [PATCH 46/47] csv sink default delimiter --- docs/kafkaSink.md | 2 +- .../com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/kafkaSink.md b/docs/kafkaSink.md index 3c6eb1dc6..5d7c7b2a7 100644 --- a/docs/kafkaSink.md +++ b/docs/kafkaSink.md @@ -41,7 +41,7 @@ CREATE TABLE tableName( |partitionKeys | 用来分区的字段|否|| |updateMode | 回溯流数据下发模式,append,upsert.upsert模式下会将是否为回溯信息以字段形式进行下发。|否|append| |sinkdatatype | 写入kafka数据格式,json,avro,csv|否|json| -|fieldDelimiter | csv数据分隔符|否| \ | +|fieldDelimiter | csv数据分隔符|否| , | **kafka相关参数可以自定义,使用kafka.开头即可。** diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java index 6740ea867..4ad8947a8 100644 --- a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/table/KafkaSinkParser.java @@ -49,7 +49,7 @@ public AbstractTableInfo getTableInfo(String tableName, String fieldsInfo, Map Date: Wed, 1 Apr 2020 14:59:25 +0800 Subject: [PATCH 47/47] kafkaSink opt --- .../sql/sink/kafka/AbstractKafkaSink.java | 148 ++++++++++++++++++ .../flink/sql/sink/kafka/KafkaSink.java | 120 ++------------ .../flink/sql/sink/kafka/KafkaSink.java | 125 ++------------- .../flink/sql/sink/kafka/KafkaSink.java | 125 ++------------- .../flink/sql/sink/kafka/KafkaSink.java | 123 ++------------- 5 files changed, 186 insertions(+), 455 deletions(-) create mode 100644 kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaSink.java diff --git a/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaSink.java b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaSink.java new file mode 100644 index 000000000..7234216a7 --- /dev/null +++ b/kafka-base/kafka-base-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/AbstractKafkaSink.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.dtstack.flink.sql.sink.kafka; + +import com.dtstack.flink.sql.sink.IStreamSinkGener; +import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; +import org.apache.commons.lang3.StringUtils; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.api.java.typeutils.TupleTypeInfo; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.functions.sink.SinkFunction; +import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; +import org.apache.flink.table.api.TableSchema; +import org.apache.flink.table.runtime.types.CRow; +import org.apache.flink.table.runtime.types.CRowTypeInfo; +import org.apache.flink.table.sinks.RetractStreamTableSink; +import org.apache.flink.table.sinks.TableSink; +import org.apache.flink.types.Row; +import org.apache.flink.util.Preconditions; +import org.apache.kafka.clients.consumer.ConsumerConfig; + +import java.util.Optional; +import java.util.Properties; +import java.util.stream.IntStream; + +/** + * Date: 2020/4/1 + * Company: www.dtstack.com + * @author maqi + */ +public abstract class AbstractKafkaSink implements RetractStreamTableSink, IStreamSinkGener { + + public static final String SINK_OPERATOR_NAME_TPL = "${topic}_${table}"; + + protected String[] fieldNames; + protected TypeInformation[] fieldTypes; + + protected String[] partitionKeys; + protected String sinkOperatorName; + protected Properties properties; + protected int parallelism; + protected String topic; + protected String tableName; + + protected TableSchema schema; + protected SinkFunction kafkaProducer; + + + protected Optional> partitioner; + + protected Properties getKafkaProperties(KafkaSinkTableInfo KafkaSinkTableInfo) { + Properties props = new Properties(); + props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaSinkTableInfo.getBootstrapServers()); + + for (String key : KafkaSinkTableInfo.getKafkaParamKeys()) { + props.setProperty(key, KafkaSinkTableInfo.getKafkaParam(key)); + } + return props; + } + + protected TypeInformation[] getTypeInformations(KafkaSinkTableInfo kafka11SinkTableInfo) { + Class[] fieldClasses = kafka11SinkTableInfo.getFieldClasses(); + TypeInformation[] types = IntStream.range(0, fieldClasses.length) + .mapToObj(i -> TypeInformation.of(fieldClasses[i])) + .toArray(TypeInformation[]::new); + return types; + } + + + protected TableSchema buildTableSchema(String[] fieldNames, TypeInformation[] fieldTypes) { + Preconditions.checkArgument(fieldNames.length == fieldTypes.length, "fieldNames length must equals fieldTypes length !"); + + TableSchema.Builder builder = TableSchema.builder(); + IntStream.range(0, fieldTypes.length) + .forEach(i -> builder.field(fieldNames[i], fieldTypes[i])); + + return builder.build(); + } + + + @Override + public void emitDataStream(DataStream> dataStream) { + DataStream mapDataStream = dataStream + .map((Tuple2 record) -> new CRow(record.f1, record.f0)) + .returns(getRowTypeInfo()) + .setParallelism(parallelism); + + mapDataStream.addSink(kafkaProducer).name(sinkOperatorName); + } + + public CRowTypeInfo getRowTypeInfo() { + return new CRowTypeInfo(new RowTypeInfo(fieldTypes, fieldNames)); + } + + protected String[] getPartitionKeys(KafkaSinkTableInfo kafkaSinkTableInfo) { + if (StringUtils.isNotBlank(kafkaSinkTableInfo.getPartitionKeys())) { + return StringUtils.split(kafkaSinkTableInfo.getPartitionKeys(), ','); + } + return null; + } + + @Override + public TupleTypeInfo> getOutputType() { + return new TupleTypeInfo(org.apache.flink.table.api.Types.BOOLEAN(), new RowTypeInfo(fieldTypes, fieldNames)); + } + + @Override + public String[] getFieldNames() { + return fieldNames; + } + + @Override + public TypeInformation[] getFieldTypes() { + return fieldTypes; + } + + @Override + public TableSink> configure(String[] fieldNames, TypeInformation[] fieldTypes) { + this.fieldNames = fieldNames; + this.fieldTypes = fieldTypes; + return this; + } + + @Override + public TypeInformation getRecordType() { + return new RowTypeInfo(fieldTypes, fieldNames); + } + + +} diff --git a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index 71e938ba5..632bb720e 100644 --- a/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka/kafka-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -18,25 +18,8 @@ package com.dtstack.flink.sql.sink.kafka; -import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; import com.dtstack.flink.sql.table.AbstractTargetTableInfo; -import org.apache.commons.lang3.StringUtils; -import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.java.tuple.Tuple2; -import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.api.java.typeutils.TupleTypeInfo; -import org.apache.flink.streaming.api.datastream.DataStream; -import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer; -import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner; -import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.runtime.types.CRow; -import org.apache.flink.table.runtime.types.CRowTypeInfo; -import org.apache.flink.table.sinks.RetractStreamTableSink; -import org.apache.flink.table.sinks.TableSink; -import org.apache.flink.table.utils.TableConnectorUtils; -import org.apache.flink.types.Row; import java.util.Optional; import java.util.Properties; @@ -46,108 +29,23 @@ * @create: 2019-11-05 11:45 * @description: **/ -public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener { - - protected String[] fieldNames; - - protected TypeInformation[] fieldTypes; - - protected String topic; - - protected int parallelism; - - protected Properties properties; - - protected FlinkKafkaProducer flinkKafkaProducer; - protected CRowTypeInfo typeInformation; - - - /** The schema of the table. */ - private TableSchema schema; - - /** Partitioner to select Kafka partition for each item. */ - protected Optional> partitioner; - - private String[] partitionKeys; - +public class KafkaSink extends AbstractKafkaSink { @Override public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { KafkaSinkTableInfo kafkaSinkTableInfo = (KafkaSinkTableInfo) targetTableInfo; - this.topic = kafkaSinkTableInfo.getTopic(); - - properties = new Properties(); - properties.setProperty("bootstrap.servers", kafkaSinkTableInfo.getBootstrapServers()); - for (String key : kafkaSinkTableInfo.getKafkaParamKeys()) { - properties.setProperty(key, kafkaSinkTableInfo.getKafkaParam(key)); - } + Properties kafkaProperties = getKafkaProperties(kafkaSinkTableInfo); + this.tableName = kafkaSinkTableInfo.getName(); + this.topic = kafkaSinkTableInfo.getTopic(); this.partitioner = Optional.of(new CustomerFlinkPartition<>()); this.partitionKeys = getPartitionKeys(kafkaSinkTableInfo); this.fieldNames = kafkaSinkTableInfo.getFields(); - TypeInformation[] types = new TypeInformation[kafkaSinkTableInfo.getFields().length]; - for (int i = 0; i < kafkaSinkTableInfo.getFieldClasses().length; i++) { - types[i] = TypeInformation.of(kafkaSinkTableInfo.getFieldClasses()[i]); - } - this.fieldTypes = types; - - TableSchema.Builder schemaBuilder = TableSchema.builder(); - for (int i=0;i) new KafkaProducerFactory() - .createKafkaProducer(kafkaSinkTableInfo, typeInformation, properties, partitioner, partitionKeys); - return this; - } - - @Override - public TypeInformation getRecordType() { - return new RowTypeInfo(fieldTypes, fieldNames); - } - - @Override - public void emitDataStream(DataStream> dataStream) { - DataStream mapDataStream = dataStream - .map((Tuple2 record) -> new CRow(record.f1, record.f0)) - .returns(typeInformation) - .setParallelism(parallelism); - - mapDataStream.addSink(flinkKafkaProducer).name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); - } - - @Override - public TupleTypeInfo> getOutputType() { - return new TupleTypeInfo(org.apache.flink.table.api.Types.BOOLEAN(), new RowTypeInfo(fieldTypes, fieldNames)); - } - - @Override - public String[] getFieldNames() { - return fieldNames; - } - - @Override - public TypeInformation[] getFieldTypes() { - return fieldTypes; - } - - @Override - public TableSink> configure(String[] fieldNames, TypeInformation[] fieldTypes) { - this.fieldNames = fieldNames; - this.fieldTypes = fieldTypes; + this.fieldTypes = getTypeInformations(kafkaSinkTableInfo); + this.schema = buildTableSchema(fieldNames, fieldTypes); + this.parallelism = kafkaSinkTableInfo.getParallelism(); + this.sinkOperatorName = SINK_OPERATOR_NAME_TPL.replace("${topic}", topic).replace("${table}", tableName); + this.kafkaProducer = new KafkaProducerFactory().createKafkaProducer(kafkaSinkTableInfo, getRowTypeInfo(), kafkaProperties, partitioner, partitionKeys); return this; } - private String[] getPartitionKeys(KafkaSinkTableInfo kafkaSinkTableInfo){ - if(StringUtils.isNotBlank(kafkaSinkTableInfo.getPartitionKeys())){ - return StringUtils.split(kafkaSinkTableInfo.getPartitionKeys(), ','); - } - return null; - } } diff --git a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index e6dbdf3d3..d22be3d59 100644 --- a/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka09/kafka09-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -18,26 +18,8 @@ package com.dtstack.flink.sql.sink.kafka; -import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; import com.dtstack.flink.sql.table.AbstractTargetTableInfo; -import org.apache.commons.lang3.StringUtils; -import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.java.tuple.Tuple2; -import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.api.java.typeutils.TupleTypeInfo; -import org.apache.flink.streaming.api.datastream.DataStream; -import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; -import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer09; -import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner; -import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.runtime.types.CRow; -import org.apache.flink.table.runtime.types.CRowTypeInfo; -import org.apache.flink.table.sinks.RetractStreamTableSink; -import org.apache.flink.table.sinks.TableSink; -import org.apache.flink.table.utils.TableConnectorUtils; -import org.apache.flink.types.Row; import java.util.Optional; import java.util.Properties; @@ -49,111 +31,22 @@ * @author DocLi * @modifyer maqi */ -public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener { - - protected String[] fieldNames; - - protected TypeInformation[] fieldTypes; - - protected String topic; - - protected Properties properties; - - protected FlinkKafkaProducer09 kafkaProducer09; - protected CRowTypeInfo typeInformation; - - /** The schema of the table. */ - private TableSchema schema; - - /** Partitioner to select Kafka partition for each item. */ - protected Optional> partitioner; - - private String[] partitionKeys; - - protected int parallelism; - - - +public class KafkaSink extends AbstractKafkaSink{ @Override public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { KafkaSinkTableInfo kafka09SinkTableInfo = (KafkaSinkTableInfo) targetTableInfo; - this.topic = kafka09SinkTableInfo.getTopic(); - - properties = new Properties(); - properties.setProperty("bootstrap.servers", kafka09SinkTableInfo.getBootstrapServers()); - for (String key : kafka09SinkTableInfo.getKafkaParamKeys()) { - properties.setProperty(key, kafka09SinkTableInfo.getKafkaParam(key)); - } + Properties kafkaProperties = getKafkaProperties(kafka09SinkTableInfo); + this.tableName = kafka09SinkTableInfo.getName(); + this.topic = kafka09SinkTableInfo.getTopic(); this.partitioner = Optional.of(new CustomerFlinkPartition<>()); this.partitionKeys = getPartitionKeys(kafka09SinkTableInfo); this.fieldNames = kafka09SinkTableInfo.getFields(); - TypeInformation[] types = new TypeInformation[kafka09SinkTableInfo.getFields().length]; - for (int i = 0; i < kafka09SinkTableInfo.getFieldClasses().length; i++) { - types[i] = TypeInformation.of(kafka09SinkTableInfo.getFieldClasses()[i]); - } - this.fieldTypes = types; - - TableSchema.Builder schemaBuilder = TableSchema.builder(); - for (int i=0;i) new KafkaProducer09Factory() - .createKafkaProducer(kafka09SinkTableInfo, typeInformation, properties, partitioner, partitionKeys); + this.fieldTypes = getTypeInformations(kafka09SinkTableInfo); + this.schema = buildTableSchema(fieldNames, fieldTypes); + this.parallelism = kafka09SinkTableInfo.getParallelism(); + this.sinkOperatorName = SINK_OPERATOR_NAME_TPL.replace("${topic}", topic).replace("${table}", tableName); + this.kafkaProducer = new KafkaProducer09Factory().createKafkaProducer(kafka09SinkTableInfo, getRowTypeInfo(), kafkaProperties, partitioner, partitionKeys); return this; } - - @Override - public TypeInformation getRecordType() { - return new RowTypeInfo(fieldTypes, fieldNames); - } - - @Override - public void emitDataStream(DataStream> dataStream) { - DataStream mapDataStream = dataStream - .map((Tuple2 record) -> new CRow(record.f1, record.f0)) - .returns(typeInformation) - .setParallelism(parallelism); - - mapDataStream.addSink(kafkaProducer09) - .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); - } - - @Override - public TupleTypeInfo> getOutputType() { - return new TupleTypeInfo(org.apache.flink.table.api.Types.BOOLEAN(), new RowTypeInfo(fieldTypes, fieldNames)); - } - - @Override - public String[] getFieldNames() { - return fieldNames; - } - - @Override - public TypeInformation[] getFieldTypes() { - return fieldTypes; - } - - @Override - public TableSink> configure(String[] fieldNames, TypeInformation[] fieldTypes) { - this.fieldNames = fieldNames; - this.fieldTypes = fieldTypes; - return this; - } - - private String[] getPartitionKeys(KafkaSinkTableInfo kafkaSinkTableInfo){ - if(StringUtils.isNotBlank(kafkaSinkTableInfo.getPartitionKeys())){ - return StringUtils.split(kafkaSinkTableInfo.getPartitionKeys(), ','); - } - return null; - } - } diff --git a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index ac5a11810..eea78e121 100644 --- a/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka10/kafka10-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -18,23 +18,8 @@ package com.dtstack.flink.sql.sink.kafka; -import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; import com.dtstack.flink.sql.table.AbstractTargetTableInfo; -import org.apache.commons.lang3.StringUtils; -import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.java.tuple.Tuple2; -import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.api.java.typeutils.TupleTypeInfo; -import org.apache.flink.streaming.api.datastream.DataStream; -import org.apache.flink.streaming.api.functions.sink.RichSinkFunction; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.runtime.types.CRow; -import org.apache.flink.table.runtime.types.CRowTypeInfo; -import org.apache.flink.table.sinks.RetractStreamTableSink; -import org.apache.flink.table.sinks.TableSink; -import org.apache.flink.table.utils.TableConnectorUtils; -import org.apache.flink.types.Row; import java.util.Optional; import java.util.Properties; @@ -48,110 +33,22 @@ * @modifyer maqi * */ -public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener { - - - protected String[] fieldNames; - - protected TypeInformation[] fieldTypes; - - protected String topic; - - protected Properties properties; - - protected int parallelism; - - protected KafkaSinkTableInfo kafka10SinkTableInfo; - - protected RichSinkFunction kafkaProducer010; - protected CRowTypeInfo typeInformation; - - /** The schema of the table. */ - private TableSchema schema; - - private String[] partitionKeys; - +public class KafkaSink extends AbstractKafkaSink { @Override public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { - this.kafka10SinkTableInfo = (KafkaSinkTableInfo) targetTableInfo; - this.topic = kafka10SinkTableInfo.getTopic(); - - properties = new Properties(); - properties.setProperty("bootstrap.servers", kafka10SinkTableInfo.getBootstrapServers()); - - for (String key : kafka10SinkTableInfo.getKafkaParamKeys()) { - properties.setProperty(key, kafka10SinkTableInfo.getKafkaParam(key)); - } + KafkaSinkTableInfo kafka10SinkTableInfo = (KafkaSinkTableInfo) targetTableInfo; + Properties kafkaProperties = getKafkaProperties(kafka10SinkTableInfo); + this.tableName = kafka10SinkTableInfo.getName(); + this.topic = kafka10SinkTableInfo.getTopic(); + this.partitioner = Optional.of(new CustomerFlinkPartition<>()); this.partitionKeys = getPartitionKeys(kafka10SinkTableInfo); this.fieldNames = kafka10SinkTableInfo.getFields(); - TypeInformation[] types = new TypeInformation[kafka10SinkTableInfo.getFields().length]; - for (int i = 0; i < kafka10SinkTableInfo.getFieldClasses().length; i++) { - types[i] = TypeInformation.of(kafka10SinkTableInfo.getFieldClasses()[i]); - } - this.fieldTypes = types; - - - TableSchema.Builder schemaBuilder = TableSchema.builder(); - for (int i=0;i()), partitionKeys); - - return this; - } - - @Override - public TypeInformation getRecordType() { - return new RowTypeInfo(fieldTypes, fieldNames); - } - - @Override - public void emitDataStream(DataStream> dataStream) { - DataStream mapDataStream = dataStream - .map((Tuple2 record) -> new CRow(record.f1, record.f0)) - .returns(typeInformation) - .setParallelism(parallelism); - - mapDataStream.addSink(kafkaProducer010).name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); - } - - @Override - public TupleTypeInfo> getOutputType() { - return new TupleTypeInfo(org.apache.flink.table.api.Types.BOOLEAN(), new RowTypeInfo(fieldTypes, fieldNames)); - } - - @Override - public String[] getFieldNames() { - return fieldNames; - } - - @Override - public TypeInformation[] getFieldTypes() { - return fieldTypes; - } - - @Override - public TableSink> configure(String[] fieldNames, TypeInformation[] fieldTypes) { - this.fieldNames = fieldNames; - this.fieldTypes = fieldTypes; + this.fieldTypes = getTypeInformations(kafka10SinkTableInfo); + this.schema = buildTableSchema(fieldNames, fieldTypes); + this.parallelism = kafka10SinkTableInfo.getParallelism(); + this.sinkOperatorName = SINK_OPERATOR_NAME_TPL.replace("${topic}", topic).replace("${table}", tableName); + this.kafkaProducer = new KafkaProducer010Factory().createKafkaProducer(kafka10SinkTableInfo, getRowTypeInfo(), kafkaProperties, partitioner, partitionKeys); return this; } - private String[] getPartitionKeys(KafkaSinkTableInfo kafkaSinkTableInfo){ - if(StringUtils.isNotBlank(kafkaSinkTableInfo.getPartitionKeys())){ - return StringUtils.split(kafkaSinkTableInfo.getPartitionKeys(), ','); - } - return null; - } - } diff --git a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java index 835941ca3..ea45280c7 100644 --- a/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java +++ b/kafka11/kafka11-sink/src/main/java/com/dtstack/flink/sql/sink/kafka/KafkaSink.java @@ -18,25 +18,8 @@ package com.dtstack.flink.sql.sink.kafka; -import com.dtstack.flink.sql.sink.IStreamSinkGener; import com.dtstack.flink.sql.sink.kafka.table.KafkaSinkTableInfo; import com.dtstack.flink.sql.table.AbstractTargetTableInfo; -import org.apache.commons.lang3.StringUtils; -import org.apache.flink.api.common.typeinfo.TypeInformation; -import org.apache.flink.api.java.tuple.Tuple2; -import org.apache.flink.api.java.typeutils.RowTypeInfo; -import org.apache.flink.api.java.typeutils.TupleTypeInfo; -import org.apache.flink.streaming.api.datastream.DataStream; -import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer011; -import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner; -import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; -import org.apache.flink.table.api.TableSchema; -import org.apache.flink.table.runtime.types.CRow; -import org.apache.flink.table.runtime.types.CRowTypeInfo; -import org.apache.flink.table.sinks.RetractStreamTableSink; -import org.apache.flink.table.sinks.TableSink; -import org.apache.flink.table.utils.TableConnectorUtils; -import org.apache.flink.types.Row; import java.util.Optional; import java.util.Properties; @@ -51,110 +34,22 @@ * @modifyer maqi * */ -public class KafkaSink implements RetractStreamTableSink, IStreamSinkGener { - - protected String[] fieldNames; - - protected TypeInformation[] fieldTypes; - - protected String topic; - - protected int parallelism; - - protected Properties properties; - - protected FlinkKafkaProducer011 kafkaProducer011; - protected CRowTypeInfo typeInformation; - - - /** The schema of the table. */ - private TableSchema schema; - - /** Partitioner to select Kafka partition for each item. */ - protected Optional> partitioner; - private String[] partitionKeys; - - +public class KafkaSink extends AbstractKafkaSink { @Override public KafkaSink genStreamSink(AbstractTargetTableInfo targetTableInfo) { KafkaSinkTableInfo kafka11SinkTableInfo = (KafkaSinkTableInfo) targetTableInfo; - this.topic = kafka11SinkTableInfo.getTopic(); - - properties = new Properties(); - properties.setProperty("bootstrap.servers", kafka11SinkTableInfo.getBootstrapServers()); - for (String key : kafka11SinkTableInfo.getKafkaParamKeys()) { - properties.setProperty(key, kafka11SinkTableInfo.getKafkaParam(key)); - } + Properties kafkaProperties = getKafkaProperties(kafka11SinkTableInfo); + this.tableName = kafka11SinkTableInfo.getName(); + this.topic = kafka11SinkTableInfo.getTopic(); this.partitioner = Optional.of(new CustomerFlinkPartition<>()); this.partitionKeys = getPartitionKeys(kafka11SinkTableInfo); this.fieldNames = kafka11SinkTableInfo.getFields(); - TypeInformation[] types = new TypeInformation[kafka11SinkTableInfo.getFields().length]; - for (int i = 0; i < kafka11SinkTableInfo.getFieldClasses().length; i++) { - types[i] = TypeInformation.of(kafka11SinkTableInfo.getFieldClasses()[i]); - } - this.fieldTypes = types; - - TableSchema.Builder schemaBuilder = TableSchema.builder(); - for (int i=0;i) new KafkaProducer011Factory() - .createKafkaProducer(kafka11SinkTableInfo, typeInformation, properties, partitioner, partitionKeys); + this.fieldTypes = getTypeInformations(kafka11SinkTableInfo); + this.schema = buildTableSchema(fieldNames, fieldTypes); + this.parallelism = kafka11SinkTableInfo.getParallelism(); + this.sinkOperatorName = SINK_OPERATOR_NAME_TPL.replace("${topic}", topic).replace("${table}", tableName); + this.kafkaProducer = new KafkaProducer011Factory().createKafkaProducer(kafka11SinkTableInfo, getRowTypeInfo(), kafkaProperties, partitioner, partitionKeys); return this; } - - @Override - public TypeInformation getRecordType() { - return new RowTypeInfo(fieldTypes, fieldNames); - } - - @Override - public void emitDataStream(DataStream> dataStream) { - - DataStream mapDataStream = dataStream - .map((Tuple2 record) -> new CRow(record.f1, record.f0)) - .returns(typeInformation) - .setParallelism(parallelism); - - mapDataStream.addSink(kafkaProducer011).name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); - } - - @Override - public TupleTypeInfo> getOutputType() { - return new TupleTypeInfo(org.apache.flink.table.api.Types.BOOLEAN(), new RowTypeInfo(fieldTypes, fieldNames)); - } - - @Override - public String[] getFieldNames() { - return fieldNames; - } - - @Override - public TypeInformation[] getFieldTypes() { - return fieldTypes; - } - - @Override - public TableSink>configure(String[] fieldNames, TypeInformation[] fieldTypes) { - this.fieldNames = fieldNames; - this.fieldTypes = fieldTypes; - return this; - } - - private String[] getPartitionKeys(KafkaSinkTableInfo kafkaSinkTableInfo){ - if(StringUtils.isNotBlank(kafkaSinkTableInfo.getPartitionKeys())){ - return StringUtils.split(kafkaSinkTableInfo.getPartitionKeys(), ','); - } - return null; - } - }