diff --git a/backend/.sqlx/query-01e92a4ba3074f1dce6ec98bc6c3fad4878f48db8c17c6d58590bd5df2e3350a.json b/backend/.sqlx/query-01e92a4ba3074f1dce6ec98bc6c3fad4878f48db8c17c6d58590bd5df2e3350a.json new file mode 100644 index 0000000000000..a6f202a16f8d2 --- /dev/null +++ b/backend/.sqlx/query-01e92a4ba3074f1dce6ec98bc6c3fad4878f48db8c17c6d58590bd5df2e3350a.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE postgres_trigger \n SET \n server_id = $1, \n last_server_ping = now(),\n error = 'Connecting...'\n WHERE \n enabled IS TRUE \n AND workspace_id = $2 \n AND path = $3 \n AND (last_server_ping IS NULL \n OR last_server_ping < now() - INTERVAL '15 seconds'\n ) \n RETURNING true\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "01e92a4ba3074f1dce6ec98bc6c3fad4878f48db8c17c6d58590bd5df2e3350a" +} diff --git a/backend/.sqlx/query-124b27de35b49fbdb13a1f772044665a84325e34ae04bf2795fafb7bb6f2f0c6.json b/backend/.sqlx/query-124b27de35b49fbdb13a1f772044665a84325e34ae04bf2795fafb7bb6f2f0c6.json new file mode 100644 index 0000000000000..18cd8fbc0b3fc --- /dev/null +++ b/backend/.sqlx/query-124b27de35b49fbdb13a1f772044665a84325e34ae04bf2795fafb7bb6f2f0c6.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO postgres_trigger (\n publication_name,\n replication_slot_name,\n workspace_id, \n path, \n script_path, \n is_flow, \n email, \n enabled, \n postgres_resource_path, \n edited_by\n ) \n VALUES (\n $1, \n $2, \n $3, \n $4, \n $5, \n $6, \n $7, \n $8, \n $9, \n $10\n )", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Bool", + "Varchar", + "Bool", + "Varchar", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "124b27de35b49fbdb13a1f772044665a84325e34ae04bf2795fafb7bb6f2f0c6" +} diff --git a/backend/.sqlx/query-199a76c04e3f0891ad09af27b9534bbabdd8703bfdf4d43df2c65e50d4ca2c85.json b/backend/.sqlx/query-199a76c04e3f0891ad09af27b9534bbabdd8703bfdf4d43df2c65e50d4ca2c85.json new file mode 100644 index 0000000000000..7dfa56ad4049b --- /dev/null +++ b/backend/.sqlx/query-199a76c04e3f0891ad09af27b9534bbabdd8703bfdf4d43df2c65e50d4ca2c85.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n schemaname AS schema_name,\n tablename AS table_name,\n attnames AS columns,\n rowfilter AS where_clause\n FROM\n pg_publication_tables\n WHERE\n pubname = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "schema_name", + "type_info": "Name" + }, + { + "ordinal": 1, + "name": "table_name", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "columns", + "type_info": "NameArray" + }, + { + "ordinal": 3, + "name": "where_clause", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Name" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "199a76c04e3f0891ad09af27b9534bbabdd8703bfdf4d43df2c65e50d4ca2c85" +} diff --git a/backend/.sqlx/query-24178c21aadc1aed90f31e9362c6505a642c8f04b883c278b07e7ef5956ce121.json b/backend/.sqlx/query-24178c21aadc1aed90f31e9362c6505a642c8f04b883c278b07e7ef5956ce121.json new file mode 100644 index 0000000000000..d8cca020fec78 --- /dev/null +++ b/backend/.sqlx/query-24178c21aadc1aed90f31e9362c6505a642c8f04b883c278b07e7ef5956ce121.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT \n \n EXISTS(SELECT 1 FROM websocket_trigger WHERE workspace_id = $1) AS \"websocket_used!\", \n \n EXISTS(SELECT 1 FROM http_trigger WHERE workspace_id = $1) AS \"http_routes_used!\",\n EXISTS(SELECT 1 FROM kafka_trigger WHERE workspace_id = $1) as \"kafka_used!\",\n EXISTS(SELECT 1 FROM nats_trigger WHERE workspace_id = $1) as \"nats_used!\",\n EXISTS(SELECT 1 FROM postgres_trigger WHERE workspace_id = $1) AS \"postgres_used!\"\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "websocket_used!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "http_routes_used!", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "kafka_used!", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "nats_used!", + "type_info": "Bool" + }, + { + "ordinal": 4, + "name": "postgres_used!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null, + null, + null, + null, + null + ] + }, + "hash": "24178c21aadc1aed90f31e9362c6505a642c8f04b883c278b07e7ef5956ce121" +} diff --git a/backend/.sqlx/query-2ef25599ea0c9ef946d6cc70ae048af970aed2638a3f767e152b654aebf68e48.json b/backend/.sqlx/query-2ef25599ea0c9ef946d6cc70ae048af970aed2638a3f767e152b654aebf68e48.json new file mode 100644 index 0000000000000..da7d13f71c7a0 --- /dev/null +++ b/backend/.sqlx/query-2ef25599ea0c9ef946d6cc70ae048af970aed2638a3f767e152b654aebf68e48.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SHOW WAL_LEVEL;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "wal_level", + "type_info": "Text" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "2ef25599ea0c9ef946d6cc70ae048af970aed2638a3f767e152b654aebf68e48" +} diff --git a/backend/.sqlx/query-4469ee6c206c46951980ea1bc73f126f339d2e3cf97f363be8921084b16dac45.json b/backend/.sqlx/query-4469ee6c206c46951980ea1bc73f126f339d2e3cf97f363be8921084b16dac45.json new file mode 100644 index 0000000000000..46a8d2e4f04fd --- /dev/null +++ b/backend/.sqlx/query-4469ee6c206c46951980ea1bc73f126f339d2e3cf97f363be8921084b16dac45.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT pubname AS publication_name FROM pg_publication;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "publication_name", + "type_info": "Name" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "4469ee6c206c46951980ea1bc73f126f339d2e3cf97f363be8921084b16dac45" +} diff --git a/backend/.sqlx/query-4ee0017771f46f0272817d18edb821940cb5064e3f155b9630b131c09c9dba13.json b/backend/.sqlx/query-4ee0017771f46f0272817d18edb821940cb5064e3f155b9630b131c09c9dba13.json new file mode 100644 index 0000000000000..52136dd33e185 --- /dev/null +++ b/backend/.sqlx/query-4ee0017771f46f0272817d18edb821940cb5064e3f155b9630b131c09c9dba13.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT \n slot_name,\n active\n FROM\n pg_replication_slots \n WHERE \n plugin = 'pgoutput' AND\n slot_type = 'logical';\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "slot_name", + "type_info": "Name" + }, + { + "ordinal": 1, + "name": "active", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true + ] + }, + "hash": "4ee0017771f46f0272817d18edb821940cb5064e3f155b9630b131c09c9dba13" +} diff --git a/backend/.sqlx/query-508d267b0d77fd12446654a502bf4968ecebec1614580e55de3d5895f0595e52.json b/backend/.sqlx/query-508d267b0d77fd12446654a502bf4968ecebec1614580e55de3d5895f0595e52.json new file mode 100644 index 0000000000000..4a95ebaf74e84 --- /dev/null +++ b/backend/.sqlx/query-508d267b0d77fd12446654a502bf4968ecebec1614580e55de3d5895f0595e52.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM postgres_trigger \n WHERE \n workspace_id = $1 AND \n path = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "508d267b0d77fd12446654a502bf4968ecebec1614580e55de3d5895f0595e52" +} diff --git a/backend/.sqlx/query-661f472ff3860983322162420457f5033b9c9afc344d9c3e385ba20a3ad2197a.json b/backend/.sqlx/query-661f472ff3860983322162420457f5033b9c9afc344d9c3e385ba20a3ad2197a.json index 1fa370e682ca6..75b8108281532 100644 --- a/backend/.sqlx/query-661f472ff3860983322162420457f5033b9c9afc344d9c3e385ba20a3ad2197a.json +++ b/backend/.sqlx/query-661f472ff3860983322162420457f5033b9c9afc344d9c3e385ba20a3ad2197a.json @@ -5,7 +5,7 @@ "columns": [ { "ordinal": 0, - "name": "bool", + "name": "?column?", "type_info": "Bool" } ], diff --git a/backend/.sqlx/query-833a4ecec12dfe67f28016a135ffe682b023d1868a182b7cac16ce799433c257.json b/backend/.sqlx/query-833a4ecec12dfe67f28016a135ffe682b023d1868a182b7cac16ce799433c257.json new file mode 100644 index 0000000000000..44653c2265d15 --- /dev/null +++ b/backend/.sqlx/query-833a4ecec12dfe67f28016a135ffe682b023d1868a182b7cac16ce799433c257.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE \n postgres_trigger\n SET \n last_server_ping = now(),\n error = $1\n WHERE\n workspace_id = $2\n AND path = $3\n AND server_id = $4 \n AND enabled IS TRUE\n RETURNING 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "833a4ecec12dfe67f28016a135ffe682b023d1868a182b7cac16ce799433c257" +} diff --git a/backend/.sqlx/query-86ae16175ace0179e784aacfd381771f0137ecab6671d632febadede729e7783.json b/backend/.sqlx/query-86ae16175ace0179e784aacfd381771f0137ecab6671d632febadede729e7783.json new file mode 100644 index 0000000000000..cbf85f6b0d594 --- /dev/null +++ b/backend/.sqlx/query-86ae16175ace0179e784aacfd381771f0137ecab6671d632febadede729e7783.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n puballtables AS all_table,\n pubinsert AS insert,\n pubupdate AS update,\n pubdelete AS delete\n FROM\n pg_publication\n WHERE\n pubname = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "all_table", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "insert", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "update", + "type_info": "Bool" + }, + { + "ordinal": 3, + "name": "delete", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Name" + ] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "86ae16175ace0179e784aacfd381771f0137ecab6671d632febadede729e7783" +} diff --git a/backend/.sqlx/query-8e1afb488096330890b1675d2b3052d2064fcc8f373fecfebd40914768b2b1cf.json b/backend/.sqlx/query-8e1afb488096330890b1675d2b3052d2064fcc8f373fecfebd40914768b2b1cf.json new file mode 100644 index 0000000000000..a6780ad262735 --- /dev/null +++ b/backend/.sqlx/query-8e1afb488096330890b1675d2b3052d2064fcc8f373fecfebd40914768b2b1cf.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM postgres_trigger WHERE script_path = $1 AND is_flow = $2 AND workspace_id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Bool", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "8e1afb488096330890b1675d2b3052d2064fcc8f373fecfebd40914768b2b1cf" +} diff --git a/backend/.sqlx/query-946977f0d525abf6267bf02e7a887434abd3e213b8c3c488166ca58fe3321147.json b/backend/.sqlx/query-946977f0d525abf6267bf02e7a887434abd3e213b8c3c488166ca58fe3321147.json new file mode 100644 index 0000000000000..94c8a3b5b42f7 --- /dev/null +++ b/backend/.sqlx/query-946977f0d525abf6267bf02e7a887434abd3e213b8c3c488166ca58fe3321147.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE postgres_trigger \n SET \n script_path = $1, \n path = $2, \n is_flow = $3, \n edited_by = $4, \n email = $5, \n postgres_resource_path = $6, \n replication_slot_name = $7,\n publication_name = $8,\n edited_at = now(), \n error = NULL,\n server_id = NULL\n WHERE \n workspace_id = $9 AND \n path = $10\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Bool", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "946977f0d525abf6267bf02e7a887434abd3e213b8c3c488166ca58fe3321147" +} diff --git a/backend/.sqlx/query-95e420b60fba20b36b2c6675998587d8cad3b67d4dfa9de52777d4ea9490b6b7.json b/backend/.sqlx/query-95e420b60fba20b36b2c6675998587d8cad3b67d4dfa9de52777d4ea9490b6b7.json new file mode 100644 index 0000000000000..74f7f38890879 --- /dev/null +++ b/backend/.sqlx/query-95e420b60fba20b36b2c6675998587d8cad3b67d4dfa9de52777d4ea9490b6b7.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE \n postgres_trigger \n SET\n last_server_ping = NULL \n WHERE \n workspace_id = $1 \n AND path = $2 \n AND server_id IS NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "95e420b60fba20b36b2c6675998587d8cad3b67d4dfa9de52777d4ea9490b6b7" +} diff --git a/backend/.sqlx/query-9cb31818d4db8a0e294884ab3dec08bfc262f99c875bf16c25bfb5e987efe978.json b/backend/.sqlx/query-9cb31818d4db8a0e294884ab3dec08bfc262f99c875bf16c25bfb5e987efe978.json new file mode 100644 index 0000000000000..d46b7090833f2 --- /dev/null +++ b/backend/.sqlx/query-9cb31818d4db8a0e294884ab3dec08bfc262f99c875bf16c25bfb5e987efe978.json @@ -0,0 +1,104 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n workspace_id,\n path,\n script_path,\n replication_slot_name,\n publication_name,\n is_flow,\n edited_by,\n email,\n edited_at,\n server_id,\n last_server_ping,\n extra_perms,\n error,\n enabled,\n postgres_resource_path\n FROM\n postgres_trigger\n WHERE\n enabled IS TRUE\n AND (last_server_ping IS NULL OR\n last_server_ping < now() - interval '15 seconds'\n )\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "workspace_id", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "path", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "replication_slot_name", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "publication_name", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "is_flow", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "edited_by", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 8, + "name": "edited_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "server_id", + "type_info": "Varchar" + }, + { + "ordinal": 10, + "name": "last_server_ping", + "type_info": "Timestamptz" + }, + { + "ordinal": 11, + "name": "extra_perms", + "type_info": "Jsonb" + }, + { + "ordinal": 12, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 13, + "name": "enabled", + "type_info": "Bool" + }, + { + "ordinal": 14, + "name": "postgres_resource_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + false, + false + ] + }, + "hash": "9cb31818d4db8a0e294884ab3dec08bfc262f99c875bf16c25bfb5e987efe978" +} diff --git a/backend/.sqlx/query-a5a03b9235b25bca359235f2e546197f02ca1cf898d7f2686419749b1cb0679e.json b/backend/.sqlx/query-a5a03b9235b25bca359235f2e546197f02ca1cf898d7f2686419749b1cb0679e.json new file mode 100644 index 0000000000000..3bcb7d31deb24 --- /dev/null +++ b/backend/.sqlx/query-a5a03b9235b25bca359235f2e546197f02ca1cf898d7f2686419749b1cb0679e.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT value, is_secret \n FROM variable \n WHERE path = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "value", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "is_secret", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "a5a03b9235b25bca359235f2e546197f02ca1cf898d7f2686419749b1cb0679e" +} diff --git a/backend/.sqlx/query-a6c168c60bc8c42f70b18565e824efe29311aabfba6e09efa10bab6a551d658b.json b/backend/.sqlx/query-a6c168c60bc8c42f70b18565e824efe29311aabfba6e09efa10bab6a551d658b.json new file mode 100644 index 0000000000000..ce9457cc6a5de --- /dev/null +++ b/backend/.sqlx/query-a6c168c60bc8c42f70b18565e824efe29311aabfba6e09efa10bab6a551d658b.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE postgres_trigger SET enabled = FALSE, error = $1, server_id = NULL, last_server_ping = NULL WHERE workspace_id = $2 AND path = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "a6c168c60bc8c42f70b18565e824efe29311aabfba6e09efa10bab6a551d658b" +} diff --git a/backend/.sqlx/query-aae8699bbaa4d6111eabee715a6f4a3600c1ccfe6847bd526a751bc7baf825c5.json b/backend/.sqlx/query-aae8699bbaa4d6111eabee715a6f4a3600c1ccfe6847bd526a751bc7baf825c5.json new file mode 100644 index 0000000000000..9b94f6bab727b --- /dev/null +++ b/backend/.sqlx/query-aae8699bbaa4d6111eabee715a6f4a3600c1ccfe6847bd526a751bc7baf825c5.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT EXISTS(\n SELECT 1 \n FROM postgres_trigger \n WHERE \n path = $1 AND \n workspace_id = $2\n )", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "aae8699bbaa4d6111eabee715a6f4a3600c1ccfe6847bd526a751bc7baf825c5" +} diff --git a/backend/.sqlx/query-bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0.json b/backend/.sqlx/query-bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0.json index b378d68f5b09b..4843d959c190c 100644 --- a/backend/.sqlx/query-bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0.json +++ b/backend/.sqlx/query-bfff3d8df18db198d6ebba8a049b00147fc8bcd42f3df37ef81b9ded80974bd0.json @@ -5,7 +5,7 @@ "columns": [ { "ordinal": 0, - "name": "bool", + "name": "?column?", "type_info": "Bool" } ], diff --git a/backend/.sqlx/query-fcc11a9353ea101109aec30f8bdd4b2ce906fffc3c51e77d083121dbd68dadd4.json b/backend/.sqlx/query-fcc11a9353ea101109aec30f8bdd4b2ce906fffc3c51e77d083121dbd68dadd4.json new file mode 100644 index 0000000000000..5af907c6b192b --- /dev/null +++ b/backend/.sqlx/query-fcc11a9353ea101109aec30f8bdd4b2ce906fffc3c51e77d083121dbd68dadd4.json @@ -0,0 +1,107 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n workspace_id,\n path,\n script_path,\n is_flow,\n edited_by,\n email,\n edited_at,\n server_id,\n last_server_ping,\n extra_perms,\n error,\n enabled,\n replication_slot_name,\n publication_name,\n postgres_resource_path\n FROM \n postgres_trigger\n WHERE \n workspace_id = $1 AND \n path = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "workspace_id", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "path", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "is_flow", + "type_info": "Bool" + }, + { + "ordinal": 4, + "name": "edited_by", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "edited_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 7, + "name": "server_id", + "type_info": "Varchar" + }, + { + "ordinal": 8, + "name": "last_server_ping", + "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "extra_perms", + "type_info": "Jsonb" + }, + { + "ordinal": 10, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 11, + "name": "enabled", + "type_info": "Bool" + }, + { + "ordinal": 12, + "name": "replication_slot_name", + "type_info": "Varchar" + }, + { + "ordinal": 13, + "name": "publication_name", + "type_info": "Varchar" + }, + { + "ordinal": 14, + "name": "postgres_resource_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + true, + false, + false, + false, + false + ] + }, + "hash": "fcc11a9353ea101109aec30f8bdd4b2ce906fffc3c51e77d083121dbd68dadd4" +} diff --git a/backend/.sqlx/query-fd1db7530acf3c84b2ab696504905a50d1ed4f69629c43de7d874769c340d909.json b/backend/.sqlx/query-fd1db7530acf3c84b2ab696504905a50d1ed4f69629c43de7d874769c340d909.json new file mode 100644 index 0000000000000..06cf05a25d352 --- /dev/null +++ b/backend/.sqlx/query-fd1db7530acf3c84b2ab696504905a50d1ed4f69629c43de7d874769c340d909.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE postgres_trigger \n SET \n enabled = $1, \n email = $2, \n edited_by = $3, \n edited_at = now(), \n server_id = NULL, \n error = NULL\n WHERE \n path = $4 AND \n workspace_id = $5 \n RETURNING 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Bool", + "Varchar", + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "fd1db7530acf3c84b2ab696504905a50d1ed4f69629c43de7d874769c340d909" +} diff --git a/backend/.vscode/settings.json b/backend/.vscode/settings.json index e2f6a75faa09b..ab8340f5c8d67 100644 --- a/backend/.vscode/settings.json +++ b/backend/.vscode/settings.json @@ -1,8 +1,6 @@ { "python.analysis.typeCheckingMode": "basic", - "rust-analyzer.linkedProjects": [ - "./windmill-common/Cargo.toml" - ], + "rust-analyzer.linkedProjects": ["./windmill-common/Cargo.toml"], "rust-analyzer.showUnlinkedFileNotification": false, "remote.portsAttributes": { "8000": { @@ -10,5 +8,8 @@ "onAutoForward": "openPreview" } }, - "remote.autoForwardPorts": true -} \ No newline at end of file + "remote.autoForwardPorts": true, + "conventionalCommits.scopes": [ + "restructring triggers, decoding trigger message on work" + ], +} diff --git a/backend/Cargo.lock b/backend/Cargo.lock index 76c4ea6ca0da2..aa79cf5e136c2 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -6123,6 +6123,15 @@ dependencies = [ "indexmap 2.7.1", ] +[[package]] +name = "pg_escape" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c7bc82ccbe2c7ef7ceed38dcac90d7ff46681e061e9d7310cbcd409113e303" +dependencies = [ + "phf", +] + [[package]] name = "phf" version = "0.11.3" @@ -6262,7 +6271,7 @@ dependencies = [ "native-tls", "tokio", "tokio-native-tls", - "tokio-postgres", + "tokio-postgres 0.7.12", ] [[package]] @@ -6283,6 +6292,33 @@ dependencies = [ "stringprep", ] +[[package]] +name = "postgres-protocol" +version = "0.6.7" +source = "git+https://github.com/imor/rust-postgres?rev=20265ef38e32a06f76b6f9b678e2077fc2211f6b#20265ef38e32a06f76b6f9b678e2077fc2211f6b" +dependencies = [ + "base64 0.22.1", + "byteorder", + "bytes", + "fallible-iterator", + "hmac", + "md-5 0.10.6", + "memchr", + "rand 0.8.5", + "sha2 0.10.8", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.7" +source = "git+https://github.com/imor/rust-postgres?rev=20265ef38e32a06f76b6f9b678e2077fc2211f6b#20265ef38e32a06f76b6f9b678e2077fc2211f6b" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol 0.6.7 (git+https://github.com/imor/rust-postgres?rev=20265ef38e32a06f76b6f9b678e2077fc2211f6b)", +] + [[package]] name = "postgres-types" version = "0.2.8" @@ -6294,7 +6330,7 @@ dependencies = [ "bytes", "chrono", "fallible-iterator", - "postgres-protocol", + "postgres-protocol 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde", "serde_json", "uuid 1.12.1", @@ -7212,7 +7248,7 @@ dependencies = [ "borsh", "bytes", "num-traits", - "postgres-types", + "postgres-types 0.2.8", "rand 0.8.5", "rkyv", "serde", @@ -9512,6 +9548,31 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-postgres" +version = "0.7.11" +source = "git+https://github.com/imor/rust-postgres?rev=20265ef38e32a06f76b6f9b678e2077fc2211f6b#20265ef38e32a06f76b6f9b678e2077fc2211f6b" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol 0.6.7 (git+https://github.com/imor/rust-postgres?rev=20265ef38e32a06f76b6f9b678e2077fc2211f6b)", + "postgres-types 0.2.7", + "rand 0.8.5", + "socket2", + "tokio", + "tokio-util", + "whoami", +] + [[package]] name = "tokio-postgres" version = "0.7.12" @@ -9529,8 +9590,8 @@ dependencies = [ "percent-encoding", "phf", "pin-project-lite", - "postgres-protocol", - "postgres-types", + "postgres-protocol 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "postgres-types 0.2.8", "rand 0.8.5", "socket2", "tokio", @@ -10736,6 +10797,7 @@ dependencies = [ "gethostname", "git-version", "lazy_static", + "memchr", "object_store", "once_cell", "prometheus", @@ -10778,6 +10840,7 @@ dependencies = [ "axum", "base32", "base64 0.22.1", + "byteorder", "bytes", "candle-core", "candle-nn", @@ -10807,6 +10870,7 @@ dependencies = [ "object_store", "openidconnect", "openssl", + "pg_escape", "pin-project", "prometheus", "quick_cache", @@ -10816,6 +10880,7 @@ dependencies = [ "reqwest 0.12.9", "rsa", "rust-embed", + "rust_decimal", "samael", "serde", "serde_json", @@ -10824,11 +10889,13 @@ dependencies = [ "sql-builder", "sqlx", "tempfile", + "thiserror 2.0.11", "time", "tinyvector", "tokenizers", "tokio", "tokio-native-tls", + "tokio-postgres 0.7.11", "tokio-tar", "tokio-tungstenite", "tokio-util", @@ -11187,7 +11254,6 @@ dependencies = [ "async-recursion", "axum", "backon", - "bigdecimal", "chrono", "chrono-tz 0.10.1", "cron", @@ -11278,7 +11344,7 @@ dependencies = [ "tar", "tiberius", "tokio", - "tokio-postgres", + "tokio-postgres 0.7.12", "tokio-util", "tracing", "urlencoding", diff --git a/backend/Cargo.toml b/backend/Cargo.toml index 5759f010be92f..2ef62c9f578a3 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -73,6 +73,7 @@ oracledb = ["windmill-worker/oracledb"] mssql = ["windmill-worker/mssql"] bigquery = ["windmill-worker/bigquery"] websocket = ["windmill-api/websocket"] +postgres_trigger = ["windmill-api/postgres_trigger"] python = ["windmill-worker/python"] smtp = ["windmill-api/smtp", "windmill-common/smtp"] csharp = ["windmill-worker/csharp"] @@ -114,6 +115,8 @@ serde.workspace = true deno_core = { workspace = true, optional = true } object_store = { workspace = true, optional = true } quote.workspace = true +memchr.workspace = true + [target.'cfg(not(target_env = "msvc"))'.dependencies] tikv-jemallocator = { optional = true, workspace = true } @@ -154,6 +157,7 @@ windmill-parser-graphql = { path = "./parsers/windmill-parser-graphql" } windmill-parser-php = { path = "./parsers/windmill-parser-php" } windmill-api-client = { path = "./windmill-api-client" } +memchr = "2.7.4" axum = { version = "^0.7", features = ["multipart"] } headers = "^0" hyper = { version = "^1", features = ["full"] } @@ -234,7 +238,7 @@ sqlx = { version = "0.8.0", features = [ "runtime-tokio-rustls", "bigdecimal" ] } -bigdecimal = "^0" +bigdecimal = {version = "^0"} dotenv = "^0" ulid = { version = "^1", features = ["uuid"] } futures = "^0" @@ -260,6 +264,7 @@ wasm-bindgen-test = "0.3.42" convert_case = "0.6.0" getrandom = "0.2" tokio-postgres = {version = "^0.7", features = ["array-impls", "with-serde_json-1", "with-chrono-0_4", "with-uuid-1", "with-bit-vec-0_6"]} +rust-postgres = { package = "tokio-postgres", git = "https://github.com/imor/rust-postgres", rev = "20265ef38e32a06f76b6f9b678e2077fc2211f6b"} bit-vec = "=0.6.3" mappable-rc = "^0" mysql_async = { version = "*", default-features = false, features = ["minimal", "default", "native-tls-tls", "rust_decimal"]} @@ -269,7 +274,7 @@ native-tls = "^0" # samael = { git="https://github.com/njaremko/samael", rev="464d015e3ae393e4b5dd00b4d6baa1b617de0dd6", features = ["xmlsec"] } samael = { version="0.0.14", features = ["xmlsec"] } gcp_auth = "0.9.0" -rust_decimal = { version = "^1", features = ["db-postgres"]} +rust_decimal = { version = "^1", features = ["db-postgres", "serde-float"]} jsonwebtoken = "8.3.0" pem = "3.0.1" nix = { version = "0.27.1", features = ["process", "signal"] } @@ -287,6 +292,7 @@ openssl = "=0.10" mail-parser = "^0" matchit = "=0.7.3" rdkafka = { version = "0.36.2", features = ["cmake-build", "ssl-vendored"] } +pg_escape = "0.1.1" async-nats = "0.38.0" nkeys = "0.4.4" @@ -311,6 +317,7 @@ opentelemetry-semantic-conventions = { version = "*", features = ["semconv_exper bollard = "0.18.1" tonic = { version = "^0", features = ["tls-native-roots"] } +byteorder = "1.5.0" tikv-jemallocator = { version = "0.5" } tikv-jemalloc-sys = { version = "^0.5" } diff --git a/backend/migrations/20241123152203_postgres_triggers.down.sql b/backend/migrations/20241123152203_postgres_triggers.down.sql new file mode 100644 index 0000000000000..66431d8887304 --- /dev/null +++ b/backend/migrations/20241123152203_postgres_triggers.down.sql @@ -0,0 +1,2 @@ +-- Add down migration script here +DROP TABLE IF EXISTS postgres_trigger; \ No newline at end of file diff --git a/backend/migrations/20241123152203_postgres_triggers.up.sql b/backend/migrations/20241123152203_postgres_triggers.up.sql new file mode 100644 index 0000000000000..d6f927560d6f8 --- /dev/null +++ b/backend/migrations/20241123152203_postgres_triggers.up.sql @@ -0,0 +1,69 @@ +-- Add up migration script here +CREATE TABLE postgres_trigger( + path VARCHAR(255) NOT NULL, + script_path VARCHAR(255) NOT NULL, + is_flow BOOLEAN NOT NULL, + workspace_id VARCHAR(50) NOT NULL, + edited_by VARCHAR(50) NOT NULL, + email VARCHAR(255) NOT NULL, + edited_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + extra_perms JSONB NULL, + postgres_resource_path VARCHAR(255) NOT NULL, + error TEXT NULL, + server_id VARCHAR(50) NULL, + last_server_ping TIMESTAMPTZ NULL, + replication_slot_name VARCHAR(255) NOT NULL, + publication_name VARCHAR(255) NOT NULL, + enabled BOOLEAN NOT NULL, + CONSTRAINT PK_postgres_trigger PRIMARY KEY (path,workspace_id), + CONSTRAINT fk_postgres_trigger_workspace FOREIGN KEY (workspace_id) + REFERENCES workspace(id) ON DELETE CASCADE +); + +GRANT ALL ON postgres_trigger TO windmill_user; +GRANT ALL ON postgres_trigger TO windmill_admin; + +ALTER TABLE postgres_trigger ENABLE ROW LEVEL SECURITY; + +CREATE POLICY admin_policy ON postgres_trigger FOR ALL TO windmill_admin USING (true); + +CREATE POLICY see_folder_extra_perms_user_select ON postgres_trigger FOR SELECT TO windmill_user +USING (SPLIT_PART(postgres_trigger.path, '/', 1) = 'f' AND SPLIT_PART(postgres_trigger.path, '/', 2) = any(regexp_split_to_array(current_setting('session.folders_read'), ',')::text[])); +CREATE POLICY see_folder_extra_perms_user_insert ON postgres_trigger FOR INSERT TO windmill_user +WITH CHECK (SPLIT_PART(postgres_trigger.path, '/', 1) = 'f' AND SPLIT_PART(postgres_trigger.path, '/', 2) = any(regexp_split_to_array(current_setting('session.folders_write'), ',')::text[])); +CREATE POLICY see_folder_extra_perms_user_update ON postgres_trigger FOR UPDATE TO windmill_user +USING (SPLIT_PART(postgres_trigger.path, '/', 1) = 'f' AND SPLIT_PART(postgres_trigger.path, '/', 2) = any(regexp_split_to_array(current_setting('session.folders_write'), ',')::text[])); +CREATE POLICY see_folder_extra_perms_user_delete ON postgres_trigger FOR DELETE TO windmill_user +USING (SPLIT_PART(postgres_trigger.path, '/', 1) = 'f' AND SPLIT_PART(postgres_trigger.path, '/', 2) = any(regexp_split_to_array(current_setting('session.folders_write'), ',')::text[])); + +CREATE POLICY see_own ON postgres_trigger FOR ALL TO windmill_user +USING (SPLIT_PART(postgres_trigger.path, '/', 1) = 'u' AND SPLIT_PART(postgres_trigger.path, '/', 2) = current_setting('session.user')); +CREATE POLICY see_member ON postgres_trigger FOR ALL TO windmill_user +USING (SPLIT_PART(postgres_trigger.path, '/', 1) = 'g' AND SPLIT_PART(postgres_trigger.path, '/', 2) = any(regexp_split_to_array(current_setting('session.groups'), ',')::text[])); + +CREATE POLICY see_extra_perms_user_select ON postgres_trigger FOR SELECT TO windmill_user +USING (extra_perms ? CONCAT('u/', current_setting('session.user'))); +CREATE POLICY see_extra_perms_user_insert ON postgres_trigger FOR INSERT TO windmill_user +WITH CHECK ((extra_perms ->> CONCAT('u/', current_setting('session.user')))::boolean); +CREATE POLICY see_extra_perms_user_update ON postgres_trigger FOR UPDATE TO windmill_user +USING ((extra_perms ->> CONCAT('u/', current_setting('session.user')))::boolean); +CREATE POLICY see_extra_perms_user_delete ON postgres_trigger FOR DELETE TO windmill_user +USING ((extra_perms ->> CONCAT('u/', current_setting('session.user')))::boolean); + +CREATE POLICY see_extra_perms_groups_select ON postgres_trigger FOR SELECT TO windmill_user +USING (extra_perms ?| regexp_split_to_array(current_setting('session.pgroups'), ',')::text[]); +CREATE POLICY see_extra_perms_groups_insert ON postgres_trigger FOR INSERT TO windmill_user +WITH CHECK (exists( + SELECT key, value FROM jsonb_each_text(extra_perms) + WHERE SPLIT_PART(key, '/', 1) = 'g' AND key = ANY(regexp_split_to_array(current_setting('session.pgroups'), ',')::text[]) + AND value::boolean)); +CREATE POLICY see_extra_perms_groups_update ON postgres_trigger FOR UPDATE TO windmill_user +USING (exists( + SELECT key, value FROM jsonb_each_text(extra_perms) + WHERE SPLIT_PART(key, '/', 1) = 'g' AND key = ANY(regexp_split_to_array(current_setting('session.pgroups'), ',')::text[]) + AND value::boolean)); +CREATE POLICY see_extra_perms_groups_delete ON postgres_trigger FOR DELETE TO windmill_user +USING (exists( + SELECT key, value FROM jsonb_each_text(extra_perms) + WHERE SPLIT_PART(key, '/', 1) = 'g' AND key = ANY(regexp_split_to_array(current_setting('session.pgroups'), ',')::text[]) + AND value::boolean)); \ No newline at end of file diff --git a/backend/parsers/windmill-parser-csharp/src/wasm_libc.rs b/backend/parsers/windmill-parser-csharp/src/wasm_libc.rs index bb703fb1310ec..b67c3a07683c2 100644 --- a/backend/parsers/windmill-parser-csharp/src/wasm_libc.rs +++ b/backend/parsers/windmill-parser-csharp/src/wasm_libc.rs @@ -1,11 +1,11 @@ +use std::collections::BTreeMap; +use std::sync::{Mutex, OnceLock}; use std::{ alloc::{self, Layout}, ffi::{c_char, c_int, c_void}, mem::align_of, ptr, }; -use std::collections::BTreeMap; -use std::sync::{Mutex, OnceLock}; use wasm_bindgen::prelude::*; /* -------------------------------- stdlib.h -------------------------------- */ @@ -71,7 +71,6 @@ pub unsafe extern "C" fn free(buf: *mut c_void) { alloc::dealloc(buf, layout); } - // In all these allocations, we store the layout before the data for later retrieval. // This is because we need to know the layout when deallocating the memory. // Here are some helper methods for that: diff --git a/backend/tests/worker.rs b/backend/tests/worker.rs index 7bb71416c7374..001b15ac45399 100644 --- a/backend/tests/worker.rs +++ b/backend/tests/worker.rs @@ -1,7 +1,7 @@ use serde::de::DeserializeOwned; use std::future::Future; use std::{str::FromStr, sync::Arc}; -use windmill_api_client::types::{NewScript, NewScriptLanguage}; +use windmill_api_client::types::{NewScript, ScriptLang as NewScriptLanguage}; #[cfg(feature = "enterprise")] use chrono::Timelike; diff --git a/backend/windmill-api/Cargo.toml b/backend/windmill-api/Cargo.toml index 3bf133c0203d7..2033132b473b8 100644 --- a/backend/windmill-api/Cargo.toml +++ b/backend/windmill-api/Cargo.toml @@ -28,6 +28,7 @@ zip = ["dep:async_zip"] oauth2 = ["dep:async-oauth2"] http_trigger = ["dep:matchit"] static_frontend = ["dep:rust-embed"] +postgres_trigger = ["dep:rust-postgres", "dep:pg_escape", "dep:byteorder", "dep:thiserror", "dep:rust_decimal"] [dependencies] windmill-queue.workspace = true @@ -107,8 +108,12 @@ rdkafka = { workspace = true, optional = true } async-nats = { workspace = true, optional = true } nkeys = { workspace = true, optional = true } const_format.workspace = true - pin-project.workspace = true http.workspace = true async-stream.workspace = true ulid.workspace = true +rust-postgres = { workspace = true, optional = true } +pg_escape = { workspace = true, optional = true } +byteorder = { workspace = true, optional = true } +thiserror = { workspace = true, optional = true } +rust_decimal = { workspace = true, optional = true } \ No newline at end of file diff --git a/backend/windmill-api/openapi.yaml b/backend/windmill-api/openapi.yaml index dd7fc1877c972..82bd7a9c0e53d 100644 --- a/backend/windmill-api/openapi.yaml +++ b/backend/windmill-api/openapi.yaml @@ -2429,12 +2429,14 @@ paths: type: boolean nats_used: type: boolean + postgres_used: + type: boolean required: - http_routes_used - websocket_used - kafka_used - nats_used - + - postgres_used /w/{workspace}/users/list: get: summary: list users @@ -8240,6 +8242,396 @@ paths: schema: type: string + /w/{workspace}/postgres_triggers/is_valid_postgres_configuration/{path}: + get: + summary: check if postgres configuration is set to logical + operationId: isValidPostgresConfiguration + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + responses: + "200": + description: boolean that indicates if postgres is set to logical level or not + content: + application/json: + schema: + type: boolean + + /w/{workspace}/postgres_triggers/create_template_script: + post: + summary: create template script + operationId: createTemplateScript + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + requestBody: + description: template script + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/TemplateScript" + responses: + "200": + description: custom id to retrieve template script + content: + text/plain: + schema: + type: string + + /w/{workspace}/postgres_triggers/get_template_script/{id}: + get: + summary: get template script + operationId: getTemplateScript + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Id" + responses: + "200": + description: template script + content: + text/plain: + schema: + type: string + + /w/{workspace}/postgres_triggers/slot/list/{path}: + get: + summary: list postgres replication slot + operationId: listPostgresReplicationSlot + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + responses: + "200": + description: list postgres slot + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/SlotList" + + /w/{workspace}/postgres_triggers/slot/create/{path}: + post: + summary: create replication slot for postgres + operationId: createPostgresReplicationSlot + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + requestBody: + description: new slot for postgres + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/Slot" + responses: + "201": + description: slot created + content: + text/plain: + schema: + type: string + + /w/{workspace}/postgres_triggers/slot/delete/{path}: + delete: + summary: delete postgres replication slot + operationId: deletePostgresReplicationSlot + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + requestBody: + description: replication slot of postgres + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/Slot" + responses: + "200": + description: postgres replication slot deleted + content: + text/plain: + schema: + type: string + + /w/{workspace}/postgres_triggers/publication/list/{path}: + get: + summary: list postgres publication + operationId: listPostgresPublication + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + responses: + "200": + description: database publication list + content: + application/json: + schema: + type: array + items: + type: string + + /w/{workspace}/postgres_triggers/publication/get/{publication}/{path}: + get: + summary: get postgres publication + operationId: getPostgresPublication + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + - $ref: "#/components/parameters/PublicationName" + responses: + "200": + description: postgres publication get + content: + application/json: + schema: + $ref: "#/components/schemas/PublicationData" + + /w/{workspace}/postgres_triggers/publication/create/{publication}/{path}: + post: + summary: create publication for postgres + operationId: createPostgresPublication + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + - $ref: "#/components/parameters/PublicationName" + requestBody: + description: new publication for postgres + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PublicationData" + responses: + "201": + description: publication created + content: + text/plain: + schema: + type: string + + /w/{workspace}/postgres_triggers/publication/update/{publication}/{path}: + post: + summary: update publication for postgres + operationId: updatePostgresPublication + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + - $ref: "#/components/parameters/PublicationName" + requestBody: + description: update publication for postgres + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PublicationData" + responses: + "201": + description: publication updated + content: + text/plain: + schema: + type: string + + + /w/{workspace}/postgres_triggers/publication/delete/{publication}/{path}: + delete: + summary: delete postgres publication + operationId: deletePostgresPublication + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + - $ref: "#/components/parameters/PublicationName" + responses: + "200": + description: postgres publication deleted + content: + text/plain: + schema: + type: string + + /w/{workspace}/postgres_triggers/create: + post: + summary: create postgres trigger + operationId: createPostgresTrigger + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + requestBody: + description: new postgres trigger + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/NewPostgresTrigger" + responses: + "201": + description: postgres trigger created + content: + text/plain: + schema: + type: string + + /w/{workspace}/postgres_triggers/update/{path}: + post: + summary: update postgres trigger + operationId: updatePostgresTrigger + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + requestBody: + description: updated trigger + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/EditPostgresTrigger" + responses: + "200": + description: postgres trigger updated + content: + text/plain: + schema: + type: string + + /w/{workspace}/postgres_triggers/delete/{path}: + delete: + summary: delete postgres trigger + operationId: deletePostgresTrigger + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + responses: + "200": + description: postgres trigger deleted + content: + text/plain: + schema: + type: string + + /w/{workspace}/postgres_triggers/get/{path}: + get: + summary: get postgres trigger + operationId: getPostgresTrigger + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + responses: + "200": + description: get postgres trigger + content: + application/json: + schema: + $ref: "#/components/schemas/PostgresTrigger" + + /w/{workspace}/postgres_triggers/list: + get: + summary: list postgres triggers + operationId: listPostgresTriggers + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + required: true + - $ref: "#/components/parameters/Page" + - $ref: "#/components/parameters/PerPage" + - name: path + description: filter by path + in: query + schema: + type: string + - name: is_flow + in: query + schema: + type: boolean + - name: path_start + in: query + schema: + type: string + responses: + "200": + description: postgres trigger list + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/PostgresTrigger" + + /w/{workspace}/postgres_triggers/exists/{path}: + get: + summary: does postgres trigger exists + operationId: existsPostgresTrigger + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + responses: + "200": + description: postgres trigger exists + content: + application/json: + schema: + type: boolean + + /w/{workspace}/postgres_triggers/setenabled/{path}: + post: + summary: set enabled postgres trigger + operationId: setPostgresTriggerEnabled + tags: + - postgres_trigger + parameters: + - $ref: "#/components/parameters/WorkspaceId" + - $ref: "#/components/parameters/Path" + requestBody: + description: updated postgres trigger enable + required: true + content: + application/json: + schema: + type: object + properties: + enabled: + type: boolean + required: + - enabled + responses: + "200": + description: postgres trigger enabled set + content: + text/plain: + schema: + type: string /groups/list: get: @@ -9094,8 +9486,7 @@ paths: required: true schema: type: string - enum: - [ + enum: [ script, group_, resource, @@ -9109,6 +9500,7 @@ paths: websocket_trigger, kafka_trigger, nats_trigger, + postgres_trigger, ] responses: "200": @@ -9149,6 +9541,7 @@ paths: websocket_trigger, kafka_trigger, nats_trigger, + postgres_trigger, ] requestBody: description: acl to add @@ -9200,6 +9593,7 @@ paths: websocket_trigger, kafka_trigger, nats_trigger, + postgres_trigger, ] requestBody: description: acl to add @@ -10623,6 +11017,12 @@ components: name: token parameters: + Id: + name: id + in: path + required: true + schema: + type: string Key: name: key in: path @@ -10635,6 +11035,12 @@ components: required: true schema: type: string + PublicationName: + name: publication + in: path + required: true + schema: + type: string VersionId: name: version in: path @@ -11057,28 +11463,7 @@ components: lock_error_logs: type: string language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - oracledb, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" kind: type: string enum: [script, failure, trigger, command, approval, preprocessor] @@ -11163,28 +11548,7 @@ components: lock: type: string language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - oracledb, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" kind: type: string enum: [script, failure, trigger, command, approval, preprocessor] @@ -11391,28 +11755,7 @@ components: is_flow_step: type: boolean language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - oracledb, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" email: type: string visible_to_owner: @@ -11515,28 +11858,7 @@ components: is_flow_step: type: boolean language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - oracledb, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" is_skipped: type: boolean email: @@ -12062,6 +12384,30 @@ components: - no_main_func - has_preprocessor + ScriptLang: + type: string + enum: + [ + python3, + deno, + go, + bash, + powershell, + postgresql, + mysql, + bigquery, + snowflake, + mssql, + oracledb, + graphql, + nativets, + bun, + php, + rust, + ansible, + csharp + ] + Preview: type: object properties: @@ -12072,28 +12418,7 @@ components: args: $ref: "#/components/schemas/ScriptArgs" language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - oracledb, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" tag: type: string kind: @@ -12458,16 +12783,36 @@ components: - is_flow - args - HttpTrigger: + TriggerExtraProperty: type: object properties: - path: + email: + type: string + extra_perms: + type: object + additionalProperties: + type: boolean + workspace_id: type: string edited_by: type: string edited_at: type: string format: date-time + required: + - email + - extra_perms + - workspace_id + - edited_by + - edited_at + + HttpTrigger: + allOf: + - $ref: "#/components/schemas/TriggerExtraProperty" + type: object + properties: + path: + type: string script_path: type: string route_path: @@ -12485,14 +12830,6 @@ components: - s3 is_flow: type: boolean - extra_perms: - type: object - additionalProperties: - type: boolean - email: - type: string - workspace_id: - type: string http_method: type: string enum: @@ -12625,35 +12962,26 @@ components: type: number websocket_count: type: number + postgres_count: + type: number kafka_count: type: number nats_count: type: number WebsocketTrigger: + allOf: + - $ref: "#/components/schemas/TriggerExtraProperty" type: object properties: path: type: string - edited_by: - type: string - edited_at: - type: string - format: date-time script_path: type: string url: type: string is_flow: type: boolean - extra_perms: - type: object - additionalProperties: - type: boolean - email: - type: string - workspace_id: - type: string server_id: type: string last_server_ping: @@ -12793,6 +13121,168 @@ components: - is_flow required: - runnable_result + + Slot: + type: object + properties: + name: + type: string + + SlotList: + type: object + properties: + slot_name: + type: string + active: + type: boolean + + PublicationData: + type: object + properties: + table_to_track: + type: array + items: + $ref: "#/components/schemas/Relations" + transaction_to_track: + type: array + items: + type: string + required: + - transaction_to_track + + TableToTrack: + type: array + items: + type: object + properties: + table_name: + type: string + columns_name: + type: array + items: + type: string + where_clause: + type: string + required: + - table_name + + Relations: + type: object + properties: + schema_name: + type: string + table_to_track: + $ref: "#/components/schemas/TableToTrack" + required: + - schema_name + - table_to_track + + Language: + type: string + enum: + - Typescript + + TemplateScript: + type: object + properties: + postgres_resource_path: + type: string + relations: + type: array + items: + $ref: "#/components/schemas/Relations" + language: + $ref: "#/components/schemas/Language" + required: + - postgres_resource_path + - relations + - language + + PostgresTrigger: + allOf: + - $ref: "#/components/schemas/TriggerExtraProperty" + type: object + properties: + path: + type: string + script_path: + type: string + is_flow: + type: boolean + enabled: + type: boolean + postgres_resource_path: + type: string + publication_name: + type: string + server_id: + type: string + replication_slot_name: + type: string + error: + type: string + required: + - path + - script_path + - is_flow + - enabled + - postgres_resource_path + - replication_slot_name + - publication_name + + NewPostgresTrigger: + type: object + properties: + replication_slot_name: + type: string + publication_name: + type: string + path: + type: string + script_path: + type: string + is_flow: + type: boolean + enabled: + type: boolean + postgres_resource_path: + type: string + publication: + $ref: "#/components/schemas/PublicationData" + required: + - path + - script_path + - is_flow + - enabled + - postgres_resource_path + + EditPostgresTrigger: + type: object + properties: + replication_slot_name: + type: string + publication_name: + type: string + path: + type: string + script_path: + type: string + is_flow: + type: boolean + enabled: + type: boolean + postgres_resource_path: + type: string + publication: + $ref: "#/components/schemas/PublicationData" + required: + - path + - script_path + - is_flow + - enabled + - postgres_resource_path + - publication_name + - replication_slot_name KafkaTrigger: type: object @@ -13817,28 +14307,7 @@ components: path: type: string language: - type: string - enum: - [ - python3, - deno, - go, - bash, - powershell, - postgresql, - mysql, - bigquery, - snowflake, - mssql, - oracledb, - graphql, - nativets, - bun, - php, - rust, - ansible, - csharp, - ] + $ref: "#/components/schemas/ScriptLang" required: - raw_code - path diff --git a/backend/windmill-api/src/ai.rs b/backend/windmill-api/src/ai.rs index 234a1fe73a31c..3b97ba18ce376 100644 --- a/backend/windmill-api/src/ai.rs +++ b/backend/windmill-api/src/ai.rs @@ -1,5 +1,8 @@ -use crate::db::{ApiAuthed, DB}; -use windmill_common::variables::decrypt; +use crate::{ + db::{ApiAuthed, DB}, + variables::get_variable_or_self, +}; + use anthropic::AnthropicCache; use axum::{ body::Bytes, @@ -17,7 +20,6 @@ use serde::{Deserialize, Deserializer}; use windmill_audit::audit_ee::audit_log; use windmill_audit::ActionKind; use windmill_common::error::{to_anyhow, Result}; -use windmill_common::variables::build_crypt; use windmill_common::error::Error; @@ -344,38 +346,11 @@ lazy_static! { pub static ref AI_KEY_CACHE: Cache = Cache::new(500); } -struct Variable { - value: String, - is_secret: bool, -} - #[derive(Deserialize, Debug)] struct ProxyQueryParams { no_cache: Option, } -async fn get_variable_or_self(path: String, db: &DB, w_id: &str) -> Result { - if !path.starts_with("$var:") { - return Ok(path); - } - let path = path.strip_prefix("$var:").unwrap().to_string(); - let mut variable = sqlx::query_as!( - Variable, - "SELECT value, is_secret - FROM variable - WHERE path = $1 AND workspace_id = $2", - &path, - &w_id - ) - .fetch_one(db) - .await?; - if variable.is_secret { - let mc = build_crypt(db, w_id).await?; - variable.value = decrypt(&mc, variable.value)?; - } - Ok(variable.value) -} - #[derive(Deserialize, Debug)] pub struct AiResource { pub path: String, diff --git a/backend/windmill-api/src/lib.rs b/backend/windmill-api/src/lib.rs index 4734b3a3ab0cc..006e52f9507b4 100644 --- a/backend/windmill-api/src/lib.rs +++ b/backend/windmill-api/src/lib.rs @@ -59,6 +59,8 @@ mod auth; mod capture; mod concurrency_groups; mod configs; +#[cfg(feature = "postgres_trigger")] +mod postgres_triggers; mod db; mod drafts; pub mod ee; @@ -308,6 +310,11 @@ pub async fn run_server( let nats_killpill_rx = rx.resubscribe(); nats_triggers_ee::start_nats_consumers(db.clone(), nats_killpill_rx).await; } + #[cfg(feature = "postgres_trigger")] + { + let db_killpill_rx = rx.resubscribe(); + postgres_triggers::start_database(db.clone(), db_killpill_rx).await; + } } // build our application with a route @@ -377,7 +384,16 @@ pub async fn run_server( Router::new() }) .nest("/kafka_triggers", kafka_triggers_service) - .nest("/nats_triggers", nats_triggers_service), + .nest("/nats_triggers", nats_triggers_service) + .nest("/postgres_triggers", { + #[cfg(feature = "postgres_trigger")] + { + postgres_triggers::workspaced_service() + } + + #[cfg(not(feature = "postgres_trigger"))] + Router::new() + }), ) .nest("/workspaces", workspaces::global_service()) .nest( diff --git a/backend/windmill-api/src/postgres_triggers/bool.rs b/backend/windmill-api/src/postgres_triggers/bool.rs new file mode 100644 index 0000000000000..9c415780a5bed --- /dev/null +++ b/backend/windmill-api/src/postgres_triggers/bool.rs @@ -0,0 +1,24 @@ +use thiserror::Error; + +/** +* This implementation is inspired by Postgres replication functionality +* from https://github.com/supabase/pg_replicate +* +* Original implementation: +* - https://github.dev/supabase/pg_replicate/blob/main/pg_replicate/src/conversions/bool.rs +* +*/ + +#[derive(Debug, Error)] +pub enum ParseBoolError { + #[error("invalid input value: {0}")] + InvalidInput(String), +} + +pub fn parse_bool(s: &str) -> Result { + match s { + "t" => Ok(true), + "f" => Ok(false), + _ => Err(ParseBoolError::InvalidInput(s.to_string())), + } +} diff --git a/backend/windmill-api/src/postgres_triggers/converter.rs b/backend/windmill-api/src/postgres_triggers/converter.rs new file mode 100644 index 0000000000000..f57268c4b81eb --- /dev/null +++ b/backend/windmill-api/src/postgres_triggers/converter.rs @@ -0,0 +1,255 @@ +use core::str; +use std::{ + num::{ParseFloatError, ParseIntError}, + str::FromStr, +}; + +use super::{ + bool::{parse_bool, ParseBoolError}, + hex::{from_bytea_hex, ByteaHexParseError}, +}; +use chrono::{DateTime, FixedOffset, NaiveDate, NaiveDateTime, NaiveTime, Utc}; +use rust_decimal::Decimal; +use rust_postgres::types::Type; +use serde_json::{to_value, Number, Value}; +use thiserror::Error; +use uuid::Uuid; + +/** +* This implementation is inspired by Postgres replication functionality +* from https://github.com/supabase/pg_replicate +* +* Original implementation: +* - https://github.com/supabase/pg_replicate/blob/main/pg_replicate/src/conversions/text.rs +* +*/ + +#[derive(Debug, Error)] +pub enum ConverterError { + #[error("invalid bool value")] + InvalidBool(#[from] ParseBoolError), + + #[error("invalid int value")] + InvalidInt(#[from] ParseIntError), + + #[error("invalid float value")] + InvalidFloat(#[from] ParseFloatError), + + #[error("invalid numeric: {0}")] + InvalidNumeric(#[from] rust_decimal::Error), + + #[error("invalid bytea: {0}")] + InvalidBytea(#[from] ByteaHexParseError), + + #[error("invalid uuid: {0}")] + InvalidUuid(#[from] uuid::Error), + + #[error("invalid json: {0}")] + InvalidJson(#[from] serde_json::Error), + + #[error("invalid timestamp: {0} ")] + InvalidTimestamp(#[from] chrono::ParseError), + + #[error("invalid array: {0}")] + InvalidArray(#[from] ArrayParseError), + + #[error("{0}")] + Custom(String), +} + +fn convert_into(number: T) -> Number +where + T: Sized, + serde_json::Number: From, +{ + serde_json::Number::from(number) +} + +pub struct Converter; + +#[derive(Debug, Error)] +pub enum ArrayParseError { + #[error("input too short")] + InputTooShort, + + #[error("missing braces")] + MissingBraces, +} + +fn f64_to_json_number(raw_val: f64) -> Result { + let temp = serde_json::Number::from_f64(raw_val.into()) + .ok_or(ConverterError::Custom("invalid json-float".to_string()))?; + Ok(Value::Number(temp)) +} + +impl Converter { + pub fn try_from_str(typ: Option, str: &str) -> Result { + let value = match typ.unwrap_or(Type::TEXT) { + Type::BOOL => Value::Bool(parse_bool(str)?), + Type::BOOL_ARRAY => { + Converter::parse_array(str, |str| Ok(Value::Bool(parse_bool(str)?)))? + } + Type::CHAR | Type::BPCHAR | Type::VARCHAR | Type::NAME | Type::TEXT => { + Value::String(str.to_string()) + } + Type::CHAR_ARRAY + | Type::BPCHAR_ARRAY + | Type::VARCHAR_ARRAY + | Type::NAME_ARRAY + | Type::TEXT_ARRAY => { + Converter::parse_array(str, |str| Ok(Value::String(str.to_string())))? + } + Type::INT2 => Value::Number(convert_into(str.parse::()?)), + Type::INT2_ARRAY => Converter::parse_array(str, |str| { + Ok(Value::Number(convert_into(str.parse::()?))) + })?, + Type::INT4 => Value::Number(convert_into(str.parse::()?)), + Type::INT4_ARRAY => Converter::parse_array(str, |str| { + Ok(Value::Number(convert_into(str.parse::()?))) + })?, + Type::INT8 => Value::Number(convert_into(str.parse::()?)), + Type::INT8_ARRAY => Converter::parse_array(str, |str| { + Ok(Value::Number(convert_into(str.parse::()?))) + })?, + Type::FLOAT4 => f64_to_json_number(str.parse::()?)?, + Type::FLOAT4_ARRAY => { + Converter::parse_array(str, |str| f64_to_json_number(str.parse::()?))? + } + Type::FLOAT8 => f64_to_json_number(str.parse::()?)?, + Type::FLOAT8_ARRAY => { + Converter::parse_array(str, |str| f64_to_json_number(str.parse::()?))? + } + Type::NUMERIC => serde_json::json!(Decimal::from_str(str)?), + Type::NUMERIC_ARRAY => { + Converter::parse_array(str, |str| Ok(serde_json::json!(Decimal::from_str(str)?)))? + } + Type::BYTEA => to_value(from_bytea_hex(str)?).unwrap(), + Type::BYTEA_ARRAY => { + Converter::parse_array(str, |str| Ok(to_value(from_bytea_hex(str)?).unwrap()))? + } + Type::DATE => { + let date = NaiveDate::parse_from_str(str, "%Y-%m-%d")?; + Value::String(date.to_string()) + } + Type::DATE_ARRAY => Converter::parse_array(str, |str| { + let date = NaiveDate::parse_from_str(str, "%Y-%m-%d")?; + Ok(Value::String(date.to_string())) + })?, + Type::TIME => { + let time = NaiveTime::parse_from_str(str, "%H:%M:%S%.f")?; + Value::String(time.to_string()) + } + Type::TIME_ARRAY => Converter::parse_array(str, |str| { + let time = NaiveTime::parse_from_str(str, "%H:%M:%S%.f")?; + Ok(Value::String(time.to_string())) + })?, + Type::TIMESTAMP => { + let timestamp = NaiveDateTime::parse_from_str(str, "%Y-%m-%d %H:%M:%S%.f")?; + Value::String(timestamp.to_string()) + } + Type::TIMESTAMP_ARRAY => Converter::parse_array(str, |str| { + let timestamp = NaiveDateTime::parse_from_str(str, "%Y-%m-%d %H:%M:%S%.f")?; + Ok(Value::String(timestamp.to_string())) + })?, + Type::TIMESTAMPTZ => { + let val = + match DateTime::::parse_from_str(str, "%Y-%m-%d %H:%M:%S%.f%#z") { + Ok(val) => val, + Err(_) => { + DateTime::::parse_from_str(str, "%Y-%m-%d %H:%M:%S%.f%:z")? + } + }; + let utc: DateTime = val.into(); + Value::String(utc.to_string()) + } + Type::TIMESTAMPTZ_ARRAY => { + match Converter::parse_array(str, |str| { + let utc: DateTime = + DateTime::::parse_from_str(str, "%Y-%m-%d %H:%M:%S%.f%#z")? + .into(); + Ok(Value::String(utc.to_string())) + }) { + Ok(val) => val, + Err(_) => Converter::parse_array(str, |str| { + let utc: DateTime = DateTime::::parse_from_str( + str, + "%Y-%m-%d %H:%M:%S%.f%#z", + )? + .into(); + Ok(Value::String(utc.to_string())) + })?, + } + } + Type::UUID => Value::String(Uuid::parse_str(str)?.to_string()), + Type::UUID_ARRAY => Converter::parse_array(str, |str| { + Ok(Value::String(Uuid::parse_str(str)?.to_string())) + })?, + Type::JSON | Type::JSONB => serde_json::from_str::(str)?, + Type::JSON_ARRAY | Type::JSONB_ARRAY => Converter::parse_array(str, |str| { + Ok(serde_json::from_str::(str)?) + })?, + Type::OID => Value::Number(convert_into(str.parse::()?)), + Type::OID_ARRAY => Converter::parse_array(str, |str| { + Ok(Value::Number(convert_into(str.parse::()?))) + })?, + _ => Value::String(str.to_string()), + }; + + Ok(value) + } + + fn parse_array

(str: &str, mut parse: P) -> Result + where + P: FnMut(&str) -> Result, + { + if str.len() < 2 { + return Err(ArrayParseError::InputTooShort.into()); + } + + if !str.starts_with('{') || !str.ends_with('}') { + return Err(ArrayParseError::MissingBraces.into()); + } + + let mut res = vec![]; + let str = &str[1..(str.len() - 1)]; + let mut val_str = String::with_capacity(10); + let mut in_quotes = false; + let mut in_escape = false; + let mut chars = str.chars(); + let mut done = str.is_empty(); + + while !done { + loop { + match chars.next() { + Some(c) => match c { + c if in_escape => { + val_str.push(c); + in_escape = false; + } + '"' => in_quotes = !in_quotes, + '\\' => in_escape = true, + ',' if !in_quotes => { + break; + } + c => { + val_str.push(c); + } + }, + None => { + done = true; + break; + } + } + } + let val = if val_str.to_lowercase() == "null" { + Value::Null + } else { + parse(&val_str)? + }; + res.push(val); + val_str.clear(); + } + let arr = Value::Array(res); + Ok(arr) + } +} diff --git a/backend/windmill-api/src/postgres_triggers/handler.rs b/backend/windmill-api/src/postgres_triggers/handler.rs new file mode 100644 index 0000000000000..fc6adbb6e279b --- /dev/null +++ b/backend/windmill-api/src/postgres_triggers/handler.rs @@ -0,0 +1,1458 @@ +use std::{ + collections::{ + hash_map::Entry::{Occupied, Vacant}, + HashMap, + }, + str::FromStr, +}; + +use crate::{ + db::{ApiAuthed, DB}, + postgres_triggers::mapper::{Mapper, MappingInfo}, +}; +use axum::{ + extract::{Path, Query}, + Extension, Json, +}; +use chrono::Utc; +use http::StatusCode; +use itertools::Itertools; +use pg_escape::{quote_identifier, quote_literal}; +use quick_cache::sync::Cache; +use rand::Rng; +use rust_postgres::types::Type; +use serde::{Deserialize, Deserializer, Serialize}; +use sql_builder::{bind::Bind, SqlBuilder}; +use sqlx::{ + postgres::{types::Oid, PgConnectOptions, PgSslMode}, + Connection, FromRow, PgConnection, QueryBuilder, +}; +use windmill_audit::{audit_ee::audit_log, ActionKind}; +use windmill_common::error::Error; +use windmill_common::{ + db::UserDB, + error::{self, JsonResult}, + utils::{not_found_if_none, paginate, Pagination, StripPath}, + worker::CLOUD_HOSTED, +}; + +use super::get_database_resource; +use lazy_static::lazy_static; + +#[derive(FromRow, Serialize, Deserialize, Debug)] +pub struct Database { + pub user: String, + pub password: String, + pub host: String, + pub port: u16, + pub dbname: String, + pub sslmode: String, + pub root_certificate_pem: String, +} + +#[derive(Debug, Clone, FromRow, Serialize, Deserialize)] +pub struct TableToTrack { + pub table_name: String, + pub where_clause: Option, + pub columns_name: Vec, +} + +impl TableToTrack { + fn new( + table_name: String, + where_clause: Option, + columns_name: Vec, + ) -> TableToTrack { + TableToTrack { table_name, where_clause, columns_name } + } +} + +lazy_static! { + pub static ref TEMPLATE: Cache = Cache::new(50); +} + +#[derive(Debug, Clone, FromRow, Serialize, Deserialize)] +pub struct Relations { + pub schema_name: String, + pub table_to_track: Vec, +} + +impl Relations { + fn new(schema_name: String, table_to_track: Vec) -> Relations { + Relations { schema_name, table_to_track } + } + + fn add_new_table(&mut self, table_to_track: TableToTrack) { + self.table_to_track.push(table_to_track); + } +} + +#[derive(Deserialize)] +pub struct EditPostgresTrigger { + replication_slot_name: String, + publication_name: String, + path: String, + script_path: String, + is_flow: bool, + postgres_resource_path: String, + publication: Option, +} + +#[derive(Deserialize, Serialize, Debug)] + +pub struct NewPostgresTrigger { + path: String, + script_path: String, + is_flow: bool, + enabled: bool, + postgres_resource_path: String, + replication_slot_name: Option, + publication_name: Option, + publication: Option, +} + +pub async fn get_database_connection( + authed: ApiAuthed, + user_db: Option, + db: &DB, + postgres_resource_path: &str, + w_id: &str, +) -> Result { + let database = get_database_resource(authed, user_db, db, postgres_resource_path, w_id).await?; + + Ok(get_raw_postgres_connection(&database).await?) +} + +pub async fn get_raw_postgres_connection(db: &Database) -> Result { + let options = { + let sslmode = if !db.sslmode.is_empty() { + PgSslMode::from_str(&db.sslmode)? + } else { + PgSslMode::Prefer + }; + let options = PgConnectOptions::new() + .host(&db.host) + .database(&db.dbname) + .port(db.port) + .ssl_mode(sslmode) + .username(&db.user); + + let options = if !db.root_certificate_pem.is_empty() { + options.ssl_root_cert_from_pem(db.root_certificate_pem.as_bytes().to_vec()) + } else { + options + }; + + if !db.password.is_empty() { + options.password(&db.password) + } else { + options + } + }; + + PgConnection::connect_with(&options) + .await + .map_err(Error::SqlErr) +} + +#[derive(Deserialize, Debug)] +pub enum Language { + #[serde(rename = "typescript", alias = "Typescript")] + Typescript, +} + +#[derive(Debug, Deserialize)] +pub struct TemplateScript { + postgres_resource_path: String, + #[serde(deserialize_with = "check_if_not_duplication_relation")] + relations: Option>, + language: Language, +} + +fn check_if_not_duplication_relation<'de, D>( + relations: D, +) -> std::result::Result>, D::Error> +where + D: Deserializer<'de>, +{ + let relations: Option> = Option::deserialize(relations)?; + + match relations { + Some(relations) => { + for relation in relations.iter() { + if relation.schema_name.is_empty() { + return Err(serde::de::Error::custom( + "Schema Name must not be empty".to_string(), + )); + } + + for table_to_track in relation.table_to_track.iter() { + if table_to_track.table_name.trim().is_empty() { + return Err(serde::de::Error::custom( + "Table name must not be empty".to_string(), + )); + } + } + } + + if !relations + .iter() + .map(|relation| relation.schema_name.as_str()) + .all_unique() + { + return Err(serde::de::Error::custom( + "You cannot choose a schema more than one time".to_string(), + )); + } + + Ok(Some(relations)) + } + None => Ok(None), + } +} + +#[derive(FromRow, Deserialize, Serialize, Debug)] +pub struct PostgresTrigger { + pub path: String, + pub script_path: String, + pub is_flow: bool, + pub workspace_id: String, + pub edited_by: String, + pub email: String, + pub edited_at: chrono::DateTime, + pub extra_perms: Option, + pub postgres_resource_path: String, + pub error: Option, + pub server_id: Option, + pub replication_slot_name: String, + pub publication_name: String, + pub last_server_ping: Option>, + pub enabled: bool, +} + +#[derive(Deserialize, Serialize)] +pub struct ListPostgresTriggerQuery { + pub page: Option, + pub per_page: Option, + pub path: Option, + pub is_flow: Option, + pub path_start: Option, +} + +#[derive(Deserialize)] +pub struct SetEnabled { + pub enabled: bool, +} + +pub async fn create_postgres_trigger( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path(w_id): Path, + Json(new_postgres_trigger): Json, +) -> error::Result<(StatusCode, String)> { + let NewPostgresTrigger { + postgres_resource_path, + path, + script_path, + enabled, + is_flow, + publication_name, + replication_slot_name, + publication, + } = new_postgres_trigger; + + if *CLOUD_HOSTED { + return Err(error::Error::BadRequest( + "Postgres triggers are not supported on multi-tenant cloud, use dedicated cloud or self-host".to_string(), + )); + } + + if publication_name.is_none() && publication.is_none() { + return Err(error::Error::BadRequest( + "Publication data is missing".to_string(), + )); + } + + let create_slot = replication_slot_name.is_none(); + let create_publication = publication_name.is_none(); + + let name; + let mut pub_name = publication_name.as_deref().unwrap_or_default(); + let mut slot_name = replication_slot_name.as_deref().unwrap_or_default(); + if create_publication || create_slot { + let generate_random_string = move || { + let timestamp = Utc::now().timestamp_millis().to_string(); + let mut rng = rand::thread_rng(); + let charset = "abcdefghijklmnopqrstuvwxyz0123456789"; + + let random_part = (0..10) + .map(|_| { + charset + .chars() + .nth(rng.gen_range(0..charset.len())) + .unwrap() + }) + .collect::(); + + format!("{}_{}", timestamp, random_part) + }; + + name = format!("windmill_{}", generate_random_string()); + pub_name = &name; + slot_name = &name; + let publication = publication.unwrap(); + + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + + new_publication( + &mut connection, + pub_name, + publication.table_to_track.as_deref(), + &publication + .transaction_to_track + .iter() + .map(AsRef::as_ref) + .collect_vec(), + ) + .await?; + + new_slot(&mut connection, slot_name).await?; + } + + let mut tx = user_db.begin(&authed).await?; + + sqlx::query!( + r#" + INSERT INTO postgres_trigger ( + publication_name, + replication_slot_name, + workspace_id, + path, + script_path, + is_flow, + email, + enabled, + postgres_resource_path, + edited_by + ) + VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + $8, + $9, + $10 + )"#, + pub_name, + slot_name, + &w_id, + &path, + script_path, + is_flow, + &authed.email, + enabled, + postgres_resource_path, + &authed.username + ) + .execute(&mut *tx) + .await?; + + audit_log( + &mut *tx, + &authed, + "postgres_triggers.create", + ActionKind::Create, + &w_id, + Some(path.as_str()), + None, + ) + .await?; + + tx.commit().await?; + + Ok((StatusCode::CREATED, path.to_string())) +} + +pub async fn list_postgres_triggers( + authed: ApiAuthed, + Extension(user_db): Extension, + Path(w_id): Path, + Query(lst): Query, +) -> error::JsonResult> { + let mut tx = user_db.begin(&authed).await?; + let (per_page, offset) = paginate(Pagination { per_page: lst.per_page, page: lst.page }); + let mut sqlb = SqlBuilder::select_from("postgres_trigger") + .fields(&[ + "workspace_id", + "path", + "script_path", + "is_flow", + "edited_by", + "email", + "edited_at", + "server_id", + "last_server_ping", + "extra_perms", + "error", + "enabled", + "postgres_resource_path", + "replication_slot_name", + "publication_name", + ]) + .order_by("edited_at", true) + .and_where("workspace_id = ?".bind(&w_id)) + .offset(offset) + .limit(per_page) + .clone(); + if let Some(path) = lst.path { + sqlb.and_where_eq("script_path", "?".bind(&path)); + } + if let Some(is_flow) = lst.is_flow { + sqlb.and_where_eq("is_flow", "?".bind(&is_flow)); + } + if let Some(path_start) = &lst.path_start { + sqlb.and_where_like_left("path", path_start); + } + let sql = sqlb + .sql() + .map_err(|e| error::Error::InternalErr(e.to_string()))?; + let rows = sqlx::query_as::<_, PostgresTrigger>(&sql) + .fetch_all(&mut *tx) + .await + .map_err(|e| { + tracing::debug!("Error fetching postgres_trigger: {:#?}", e); + windmill_common::error::Error::InternalErr("server error".to_string()) + })?; + tx.commit().await.map_err(|e| { + tracing::debug!("Error commiting postgres_trigger: {:#?}", e); + windmill_common::error::Error::InternalErr("server error".to_string()) + })?; + + Ok(Json(rows)) +} + +#[derive(Deserialize, Serialize, Debug)] +pub struct PublicationData { + #[serde(default, deserialize_with = "check_if_not_duplication_relation")] + table_to_track: Option>, + #[serde(deserialize_with = "check_if_valid_transaction_type")] + transaction_to_track: Vec, +} + +fn check_if_valid_transaction_type<'de, D>( + transaction_type: D, +) -> std::result::Result, D::Error> +where + D: Deserializer<'de>, +{ + let mut transaction_type: Vec = Vec::deserialize(transaction_type)?; + if transaction_type.len() > 3 { + return Err(serde::de::Error::custom( + "More than 3 transaction type which is not authorized, you are only allowed to those 3 transaction types: Insert, Update and Delete" + .to_string(), + )); + } + transaction_type.sort_unstable(); + transaction_type.dedup(); + + for transaction in transaction_type.iter() { + match transaction.to_lowercase().as_ref() { + "insert" => {}, + "update" => {}, + "delete" => {}, + _ => { + return Err(serde::de::Error::custom( + "Only the following transaction types are allowed: Insert, Update and Delete (case insensitive)" + .to_string(), + )) + } + } + } + + Ok(transaction_type) +} + +impl PublicationData { + fn new( + table_to_track: Option>, + transaction_to_track: Vec, + ) -> PublicationData { + PublicationData { table_to_track, transaction_to_track } + } +} + +#[derive(Debug, Serialize)] +pub struct SlotList { + slot_name: Option, + active: Option, +} + +pub async fn list_slot_name( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, postgres_resource_path)): Path<(String, String)>, +) -> error::Result>> { + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + + let slots = sqlx::query_as!( + SlotList, + r#" + SELECT + slot_name, + active + FROM + pg_replication_slots + WHERE + plugin = 'pgoutput' AND + slot_type = 'logical'; + "# + ) + .fetch_all(&mut connection) + .await?; + + Ok(Json(slots)) +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Slot { + name: String, +} + +async fn new_slot(connection: &mut PgConnection, name: &str) -> error::Result<()> { + let query = format!( + r#" + SELECT + * + FROM + pg_create_logical_replication_slot({}, 'pgoutput');"#, + quote_literal(&name) + ); + + sqlx::query(&query).execute(connection).await?; + + Ok(()) +} + +pub async fn create_slot( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, postgres_resource_path)): Path<(String, String)>, + Json(Slot { name }): Json, +) -> error::Result { + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + + new_slot(&mut connection, &name).await?; + + Ok(format!("Slot {} created!", name)) +} + +pub async fn drop_slot_name( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, postgres_resource_path)): Path<(String, String)>, + Json(Slot { name }): Json, +) -> error::Result { + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + + let query = format!("SELECT pg_drop_replication_slot({});", quote_literal(&name)); + sqlx::query(&query).execute(&mut connection).await?; + + Ok(format!("Slot name {} deleted!", name)) +} +#[derive(Debug, Serialize)] +struct PublicationName { + publication_name: String, +} + +pub async fn list_database_publication( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, postgres_resource_path)): Path<(String, String)>, +) -> error::Result>> { + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + + let publication_names = sqlx::query_as!( + PublicationName, + "SELECT pubname AS publication_name FROM pg_publication;" + ) + .fetch_all(&mut connection) + .await?; + + let publications = publication_names + .iter() + .map(|publication| publication.publication_name.to_owned()) + .collect_vec(); + + Ok(Json(publications)) +} + +pub async fn get_publication_info( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, publication_name, postgres_resource_path)): Path<(String, String, String)>, +) -> error::Result> { + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + + let publication_data = + get_publication_scope_and_transaction(&publication_name, &mut connection).await; + + let (all_table, transaction_to_track) = match publication_data { + Ok(pub_data) => pub_data, + Err(Error::SqlErr(sqlx::Error::RowNotFound)) => { + return Err(Error::NotFound( + "Publication was not found, please create a new publication".to_string(), + )) + } + Err(e) => return Err(e), + }; + + let table_to_track = if !all_table { + Some(get_tracked_relations(&mut connection, &publication_name).await?) + } else { + None + }; + Ok(Json(PublicationData::new( + table_to_track, + transaction_to_track, + ))) +} + +async fn new_publication( + connection: &mut PgConnection, + publication_name: &str, + table_to_track: Option<&[Relations]>, + transaction_to_track: &[&str], +) -> Result<(), Error> { + let mut query = QueryBuilder::new("CREATE PUBLICATION "); + + query.push(quote_identifier(publication_name)); + + match table_to_track { + Some(database_component) if !database_component.is_empty() => { + query.push(" FOR"); + for (i, schema) in database_component.iter().enumerate() { + if schema.table_to_track.is_empty() { + query.push(" TABLES IN SCHEMA "); + query.push(quote_identifier(&schema.schema_name)); + } else { + query.push(" TABLE ONLY "); + for (j, table) in schema.table_to_track.iter().enumerate() { + let table_name = quote_identifier(&table.table_name); + let schema_name = quote_identifier(&schema.schema_name); + let full_name = format!("{}.{}", &schema_name, &table_name); + query.push(full_name); + if !table.columns_name.is_empty() { + query.push(" ("); + let columns = table + .columns_name + .iter() + .map(|column| quote_identifier(column)) + .join(", "); + query.push(&columns); + query.push(")"); + } + + if let Some(where_clause) = &table.where_clause { + query.push(" WHERE ("); + query.push(where_clause); + query.push(')'); + } + + if j + 1 != schema.table_to_track.len() { + query.push(", "); + } + } + } + if i < database_component.len() - 1 { + query.push(", "); + } + } + } + _ => { + query.push(" FOR ALL TABLES "); + } + }; + + if !transaction_to_track.is_empty() { + let transactions = || transaction_to_track.iter().join(", "); + query.push(" WITH (publish = '"); + query.push(transactions()); + query.push("');"); + } + + let query = query.build(); + query.execute(&mut *connection).await?; + + Ok(()) +} + +pub async fn create_publication( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, publication_name, postgres_resource_path)): Path<(String, String, String)>, + Json(publication_data): Json, +) -> error::Result { + let PublicationData { table_to_track, transaction_to_track } = publication_data; + + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + + new_publication( + &mut connection, + &publication_name, + table_to_track.as_deref(), + &transaction_to_track.iter().map(AsRef::as_ref).collect_vec(), + ) + .await?; + + Ok(format!( + "Publication {} successfully created!", + publication_name + )) +} + +async fn drop_publication( + publication_name: &str, + connection: &mut PgConnection, +) -> Result<(), Error> { + let mut query = QueryBuilder::new("DROP PUBLICATION IF EXISTS "); + let quoted_publication_name = quote_identifier(publication_name); + query.push(quoted_publication_name); + query.push(";"); + query.build().execute(&mut *connection).await?; + Ok(()) +} + +pub async fn delete_publication( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, publication_name, postgres_resource_path)): Path<(String, String, String)>, +) -> error::Result { + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + + drop_publication(&publication_name, &mut connection).await?; + + Ok(format!( + "Publication {} successfully deleted!", + publication_name + )) +} + +async fn update_publication( + connection: &mut PgConnection, + publication_name: &str, + PublicationData { table_to_track, transaction_to_track }: PublicationData, +) -> error::Result { + let (all_table, _) = + get_publication_scope_and_transaction(&publication_name, connection).await?; + + let mut query = QueryBuilder::new(""); + let quoted_publication_name = quote_identifier(&publication_name); + + let transaction_to_track_as_str = transaction_to_track.iter().join(","); + + match table_to_track { + Some(ref relations) if !relations.is_empty() => { + if all_table { + drop_publication(&publication_name, connection).await?; + new_publication( + connection, + &publication_name, + table_to_track.as_deref(), + &transaction_to_track.iter().map(AsRef::as_ref).collect_vec(), + ) + .await?; + } else { + query.push("ALTER PUBLICATION "); + query.push("ed_publication_name); + query.push(" SET"); + for (i, schema) in relations.iter().enumerate() { + if schema.table_to_track.is_empty() { + query.push(" TABLES IN SCHEMA "); + let quoted_schema = quote_identifier(&schema.schema_name); + query.push("ed_schema); + } else { + query.push(" TABLE ONLY "); + for (j, table) in schema.table_to_track.iter().enumerate() { + let table_name = quote_identifier(&table.table_name); + let schema_name = quote_identifier(&schema.schema_name); + let full_name = format!("{}.{}", &schema_name, &table_name); + query.push(&full_name); + if !table.columns_name.is_empty() { + query.push(" ("); + let columns = table + .columns_name + .iter() + .map(|column| quote_identifier(column)) + .join(", "); + query.push(&columns); + query.push(") "); + } + + if let Some(where_clause) = &table.where_clause { + query.push(" WHERE ("); + query.push(where_clause); + query.push(')'); + } + + if j + 1 != schema.table_to_track.len() { + query.push(", "); + } + } + } + if i < relations.len() - 1 { + query.push(','); + } + } + query.push(";"); + query.build().execute(&mut *connection).await?; + query.reset(); + query.push("ALTER PUBLICATION "); + query.push("ed_publication_name); + query.push(format!( + " SET (publish = '{}');", + transaction_to_track_as_str + )); + } + } + _ => { + drop_publication(&publication_name, connection).await?; + let to_execute = format!( + r#" + CREATE + PUBLICATION {} FOR ALL TABLES WITH (publish = '{}') + "#, + quoted_publication_name, transaction_to_track_as_str + ); + query.push(&to_execute); + } + }; + + query.build().execute(&mut *connection).await?; + + Ok(format!( + "Publication {} successfully updated!", + publication_name + )) +} + +pub async fn alter_publication( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, publication_name, postgres_resource_path)): Path<(String, String, String)>, + Json(publication_data): Json, +) -> error::Result { + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + let message = update_publication(&mut connection, &publication_name, publication_data).await?; + + Ok(message) +} + +async fn get_publication_scope_and_transaction( + publication_name: &str, + connection: &mut PgConnection, +) -> Result<(bool, Vec), Error> { + #[derive(Debug, Deserialize, FromRow)] + struct PublicationTransaction { + all_table: bool, + insert: bool, + update: bool, + delete: bool, + } + + let transaction = sqlx::query_as!( + PublicationTransaction, + r#" + SELECT + puballtables AS all_table, + pubinsert AS insert, + pubupdate AS update, + pubdelete AS delete + FROM + pg_publication + WHERE + pubname = $1 + "#, + publication_name + ) + .fetch_one(&mut *connection) + .await?; + + let mut transaction_to_track = Vec::with_capacity(3); + + if transaction.insert { + transaction_to_track.push("insert".to_string()); + } + if transaction.update { + transaction_to_track.push("update".to_string()); + } + if transaction.delete { + transaction_to_track.push("delete".to_string()); + } + + Ok((transaction.all_table, transaction_to_track)) +} + +async fn get_tracked_relations( + connection: &mut PgConnection, + publication_name: &str, +) -> error::Result> { + #[derive(Debug, Deserialize, FromRow)] + struct PublicationData { + schema_name: Option, + table_name: Option, + columns: Option>, + where_clause: Option, + } + + let publications = sqlx::query_as!( + PublicationData, + r#" + SELECT + schemaname AS schema_name, + tablename AS table_name, + attnames AS columns, + rowfilter AS where_clause + FROM + pg_publication_tables + WHERE + pubname = $1 + "#, + publication_name + ) + .fetch_all(&mut *connection) + .await?; + + let mut table_to_track: HashMap = HashMap::new(); + + for publication in publications { + let schema_name = publication.schema_name.unwrap(); + let entry = table_to_track.entry(schema_name.clone()); + let table_to_track = TableToTrack::new( + publication.table_name.unwrap(), + publication.where_clause, + publication.columns.unwrap(), + ); + match entry { + Occupied(mut occuped) => { + occuped.get_mut().add_new_table(table_to_track); + } + Vacant(vacant) => { + vacant.insert(Relations::new(schema_name, vec![table_to_track])); + } + } + } + Ok(table_to_track.into_values().collect_vec()) +} + +pub async fn get_postgres_trigger( + authed: ApiAuthed, + Extension(user_db): Extension, + Path((w_id, path)): Path<(String, StripPath)>, +) -> JsonResult { + let mut tx = user_db.begin(&authed).await?; + let path = path.to_path(); + let trigger = sqlx::query_as!( + PostgresTrigger, + r#" + SELECT + workspace_id, + path, + script_path, + is_flow, + edited_by, + email, + edited_at, + server_id, + last_server_ping, + extra_perms, + error, + enabled, + replication_slot_name, + publication_name, + postgres_resource_path + FROM + postgres_trigger + WHERE + workspace_id = $1 AND + path = $2 + "#, + &w_id, + &path + ) + .fetch_optional(&mut *tx) + .await?; + tx.commit().await?; + + let trigger = not_found_if_none(trigger, "Trigger", path)?; + + Ok(Json(trigger)) +} + +pub async fn update_postgres_trigger( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, path)): Path<(String, StripPath)>, + Json(postgres_trigger): Json, +) -> error::Result { + let workspace_path = path.to_path(); + let EditPostgresTrigger { + replication_slot_name, + publication_name, + script_path, + path, + is_flow, + postgres_resource_path, + publication, + } = postgres_trigger; + + if let Some(publication) = publication { + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + update_publication(&mut connection, &publication_name, publication).await?; + } + let mut tx = user_db.begin(&authed).await?; + + sqlx::query!( + r#" + UPDATE postgres_trigger + SET + script_path = $1, + path = $2, + is_flow = $3, + edited_by = $4, + email = $5, + postgres_resource_path = $6, + replication_slot_name = $7, + publication_name = $8, + edited_at = now(), + error = NULL, + server_id = NULL + WHERE + workspace_id = $9 AND + path = $10 + "#, + script_path, + path, + is_flow, + &authed.username, + &authed.email, + postgres_resource_path, + replication_slot_name, + publication_name, + w_id, + workspace_path, + ) + .execute(&mut *tx) + .await?; + + audit_log( + &mut *tx, + &authed, + "postgres_triggers.update", + ActionKind::Create, + &w_id, + Some(&path), + None, + ) + .await?; + + tx.commit().await?; + + Ok(workspace_path.to_string()) +} + +pub async fn delete_postgres_trigger( + authed: ApiAuthed, + Extension(user_db): Extension, + Path((w_id, path)): Path<(String, StripPath)>, +) -> error::Result { + let path = path.to_path(); + let mut tx = user_db.begin(&authed).await?; + sqlx::query!( + r#" + DELETE FROM postgres_trigger + WHERE + workspace_id = $1 AND + path = $2 + "#, + w_id, + path, + ) + .execute(&mut *tx) + .await?; + + audit_log( + &mut *tx, + &authed, + "postgres_triggers.delete", + ActionKind::Delete, + &w_id, + Some(path), + None, + ) + .await?; + + tx.commit().await?; + + Ok(format!("Postgres trigger {path} deleted")) +} + +pub async fn exists_postgres_trigger( + Extension(db): Extension, + Path((w_id, path)): Path<(String, StripPath)>, +) -> JsonResult { + let path = path.to_path(); + let exists = sqlx::query_scalar!( + r#" + SELECT EXISTS( + SELECT 1 + FROM postgres_trigger + WHERE + path = $1 AND + workspace_id = $2 + )"#, + path, + w_id, + ) + .fetch_one(&db) + .await? + .unwrap_or(false); + Ok(Json(exists)) +} + +pub async fn set_enabled( + authed: ApiAuthed, + Extension(user_db): Extension, + Path((w_id, path)): Path<(String, StripPath)>, + Json(payload): Json, +) -> error::Result { + let mut tx = user_db.begin(&authed).await?; + let path = path.to_path(); + + // important to set server_id, last_server_ping and error to NULL to stop current postgres listener + let one_o = sqlx::query_scalar!( + r#" + UPDATE postgres_trigger + SET + enabled = $1, + email = $2, + edited_by = $3, + edited_at = now(), + server_id = NULL, + error = NULL + WHERE + path = $4 AND + workspace_id = $5 + RETURNING 1 + "#, + payload.enabled, + &authed.email, + &authed.username, + path, + w_id, + ) + .fetch_optional(&mut *tx) + .await? + .flatten(); + + not_found_if_none(one_o, "Postgres trigger", path)?; + + audit_log( + &mut *tx, + &authed, + "postgres_triggers.setenabled", + ActionKind::Update, + &w_id, + Some(path), + Some([("enabled", payload.enabled.to_string().as_ref())].into()), + ) + .await?; + + tx.commit().await?; + + Ok(format!( + "succesfully updated postgres trigger at path {} to status {}", + path, payload.enabled + )) +} + +pub async fn get_template_script(Path((_, id)): Path<(String, String)>) -> error::Result { + let template = if let Some((_, template)) = TEMPLATE.remove(&id) { + template + } else { + "".to_string() + }; + Ok(template) +} + +pub async fn create_template_script( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path(w_id): Path, + Json(template_script): Json, +) -> error::Result { + let TemplateScript { postgres_resource_path, relations, language } = template_script; + if relations.is_none() { + return Err(Error::BadRequest( + "You must at least choose schema to fetch table from".to_string(), + )); + } + + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + + #[derive(Debug, FromRow, Deserialize)] + struct ColumnInfo { + table_schema: Option, + table_name: Option, + column_name: Option, + oid: Oid, + is_nullable: bool, + } + + let relations = relations.unwrap(); + let mut schema_or_fully_qualified_name = Vec::with_capacity(relations.len()); + let mut columns_list = Vec::new(); + for relation in relations { + if !relation.table_to_track.is_empty() { + for table_to_track in relation.table_to_track { + let fully_qualified_name = + format!("{}.{}", &relation.schema_name, table_to_track.table_name); + schema_or_fully_qualified_name.push(quote_literal(&fully_qualified_name)); + + let columns = if !table_to_track.columns_name.is_empty() { + quote_literal(&table_to_track.columns_name.join(",")) + } else { + "''".to_string() + }; + columns_list.push(columns); + } + continue; + } + + schema_or_fully_qualified_name.push(quote_literal(&relation.schema_name)); + columns_list.push(String::from("''")); + } + + let tables_name = schema_or_fully_qualified_name.join(","); + let columns_list = columns_list.join(","); + + let query = format!( + r#" + WITH table_column_mapping AS ( + SELECT + unnest(ARRAY[{}]) AS table_name, + unnest(ARRAY[{}]) AS column_list + ), + parsed_columns AS ( + SELECT + tcm.table_name, + CASE + WHEN tcm.column_list = '' THEN NULL + ELSE string_to_array(tcm.column_list, ',') + END AS columns + FROM + table_column_mapping tcm + ) + SELECT + ns.nspname AS table_schema, + cls.relname AS table_name, + attr.attname AS column_name, + attr.atttypid AS oid, + attr.attnotnull AS is_nullable + FROM + pg_attribute attr + JOIN + pg_class cls + ON attr.attrelid = cls.oid + JOIN + pg_namespace ns + ON cls.relnamespace = ns.oid + JOIN + parsed_columns pc + ON ns.nspname || '.' || cls.relname = pc.table_name + OR ns.nspname = pc.table_name + WHERE + attr.attnum > 0 -- Exclude system columns + AND NOT attr.attisdropped -- Exclude dropped columns + AND cls.relkind = 'r' -- Restrict to base tables + AND ( + pc.columns IS NULL + OR attr.attname = ANY(pc.columns) + ); + "#, + tables_name, columns_list + ); + + let rows: Vec = sqlx::query_as(&query) + .fetch_all(&mut connection) + .await + .map_err(error::Error::SqlErr)?; + + let mut mapper: HashMap>> = HashMap::new(); + + for row in rows { + let ColumnInfo { table_schema, table_name, column_name, oid, is_nullable } = row; + + let entry = mapper.entry(table_schema.unwrap()); + + let mapped_info = + MappingInfo::new(column_name.unwrap(), Type::from_oid(oid.0), is_nullable); + + match entry { + Occupied(mut occupied) => { + let entry = occupied.get_mut().entry(table_name.unwrap()); + match entry { + Occupied(mut occuped) => { + let mapping_info = occuped.get_mut(); + mapping_info.push(mapped_info); + } + Vacant(vacant) => { + let mut mapping_info = Vec::with_capacity(10); + mapping_info.push(mapped_info); + vacant.insert(mapping_info); + } + } + } + Vacant(vacant) => { + let mut mapping_info = Vec::with_capacity(10); + mapping_info.push(mapped_info); + vacant.insert(HashMap::from([(table_name.unwrap(), mapping_info)])); + } + } + } + + let mapper = Mapper::new(mapper, language); + + let create_template_id = |w_id: &str| -> String { + let uuid = uuid::Uuid::new_v4().to_string(); + let id = format!("{}-{}", &w_id, &uuid); + + id + }; + + let template = mapper.get_template(); + let id = create_template_id(&w_id); + + TEMPLATE.insert(id.clone(), template); + + Ok(id) +} + +pub async fn is_database_in_logical_level( + authed: ApiAuthed, + Extension(user_db): Extension, + Extension(db): Extension, + Path((w_id, postgres_resource_path)): Path<(String, String)>, +) -> error::JsonResult { + let mut connection = get_database_connection( + authed.clone(), + Some(user_db.clone()), + &db, + &postgres_resource_path, + &w_id, + ) + .await?; + + let wal_level = sqlx::query_scalar!("SHOW WAL_LEVEL;") + .fetch_optional(&mut connection) + .await? + .flatten(); + + let is_logical = match wal_level.as_deref() { + Some("logical") => true, + _ => false, + }; + + Ok(Json(is_logical)) +} diff --git a/backend/windmill-api/src/postgres_triggers/hex.rs b/backend/windmill-api/src/postgres_triggers/hex.rs new file mode 100644 index 0000000000000..2ada70dd3508f --- /dev/null +++ b/backend/windmill-api/src/postgres_triggers/hex.rs @@ -0,0 +1,45 @@ +use std::num::ParseIntError; + + +/** +* This implementation is inspired by Postgres replication functionality +* from https://github.com/supabase/pg_replicate +* +* Original implementation: +* - https://github.dev/supabase/pg_replicate/blob/main/pg_replicate/src/conversions/hex.rs +* +*/ + +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum ByteaHexParseError { + #[error("missing prefix '\\x'")] + InvalidPrefix, + + #[error("invalid byte")] + OddNumerOfDigits, + + #[error("parse int result: {0}")] + ParseInt(#[from] ParseIntError), +} + +pub fn from_bytea_hex(s: &str) -> Result, ByteaHexParseError> { + if s.len() < 2 || &s[..2] != "\\x" { + return Err(ByteaHexParseError::InvalidPrefix); + } + + let mut result = Vec::with_capacity((s.len() - 2) / 2); + let s = &s[2..]; + + if s.len() % 2 != 0 { + return Err(ByteaHexParseError::OddNumerOfDigits); + } + + for i in (0..s.len()).step_by(2) { + let val = u8::from_str_radix(&s[i..i + 2], 16)?; + result.push(val); + } + + Ok(result) +} diff --git a/backend/windmill-api/src/postgres_triggers/mapper.rs b/backend/windmill-api/src/postgres_triggers/mapper.rs new file mode 100644 index 0000000000000..ec3626c7f77a0 --- /dev/null +++ b/backend/windmill-api/src/postgres_triggers/mapper.rs @@ -0,0 +1,134 @@ +use std::collections::HashMap; + +use rust_postgres::types::Type; + +use super::handler::Language; + +fn postgres_to_typescript_type(postgres_type: Option) -> String { + let data_type = match postgres_type { + Some(postgres_type) => match postgres_type { + Type::BOOL => "boolean", + Type::BOOL_ARRAY => "Array", + Type::CHAR | Type::BPCHAR | Type::VARCHAR | Type::NAME | Type::TEXT => "string", + Type::CHAR_ARRAY + | Type::BPCHAR_ARRAY + | Type::VARCHAR_ARRAY + | Type::NAME_ARRAY + | Type::TEXT_ARRAY => "Array", + Type::INT2 | Type::INT4 | Type::INT8 | Type::NUMERIC => "number", + Type::INT2_ARRAY | Type::INT4_ARRAY | Type::INT8_ARRAY => "Array", + Type::FLOAT4 | Type::FLOAT8 => "number", + Type::FLOAT8_ARRAY | Type::FLOAT4_ARRAY => "Array", + Type::NUMERIC_ARRAY => "Array", + Type::BYTEA => "Array", + Type::BYTEA_ARRAY => "Array>", + Type::DATE => "string", + Type::DATE_ARRAY => "Array", + Type::TIME => "string", + Type::TIME_ARRAY => "Array", + Type::TIMESTAMPTZ | Type::TIMESTAMP => "Date", + Type::TIMESTAMPTZ_ARRAY | Type::TIMESTAMP_ARRAY => "Array", + Type::UUID => "string", + Type::UUID_ARRAY => "Array", + Type::JSON | Type::JSONB | Type::JSON_ARRAY | Type::JSONB_ARRAY => "unknown", + Type::OID => "number", + Type::OID_ARRAY => "Array", + _ => "string", + }, + None => "string", + }; + + data_type.to_string() +} + +fn into_body_struct(language: Language, mapped_info: Vec) -> String { + let mut block = String::new(); + match language { + Language::Typescript => { + block.push_str("{\r\n"); + for field in mapped_info { + let typescript_type = postgres_to_typescript_type(field.data_type); + let mut key = field.column_name; + if field.is_nullable { + key.push('?'); + } + let full_field = format!("\t\t{}: {},\r\n", key, typescript_type); + block.push_str(&full_field); + } + block.push_str("\t}"); + } + } + block +} + +#[derive(Debug)] +pub struct MappingInfo { + data_type: Option, + is_nullable: bool, + column_name: String, +} + +impl MappingInfo { + pub fn new(column_name: String, data_type: Option, is_nullable: bool) -> Self { + Self { column_name, data_type, is_nullable } + } +} + +pub struct Mapper { + to_template: HashMap>>, + language: Language, +} + +impl Mapper { + pub fn new( + to_template: HashMap>>, + language: Language, + ) -> Self { + Self { to_template, language } + } + + fn into_typescript_template(self) -> Vec { + let mut struct_definitions = Vec::new(); + for (_, mapping_info) in self.to_template { + let last_elem = mapping_info.len() - 1; + for (i, (_, mapped_info)) in mapping_info.into_iter().enumerate() { + let mut struct_body = into_body_struct(Language::Typescript, mapped_info); + let struct_body = if i != last_elem { + struct_body.push_str("\r\n"); + struct_body + } else { + struct_body + }; + struct_definitions.push(struct_body); + } + } + struct_definitions + } + + pub fn get_template(self) -> String { + let struct_definition = match self.language { + Language::Typescript => self.into_typescript_template(), + }; + + let struct_definition = if struct_definition.is_empty() { + "any".to_string() + } else { + struct_definition.join("\t| ") + }; + + format!( + r#" + + +export async function main( + transaction_type: "insert" | "update" | "delete", + schema_name: string, + table_name: string, + row: {} +) {{ +}} + "#, + struct_definition + ) + } +} diff --git a/backend/windmill-api/src/postgres_triggers/mod.rs b/backend/windmill-api/src/postgres_triggers/mod.rs new file mode 100644 index 0000000000000..9aa50f8fec0ea --- /dev/null +++ b/backend/windmill-api/src/postgres_triggers/mod.rs @@ -0,0 +1,155 @@ +use crate::{ + db::{ApiAuthed, DB}, + jobs::{run_flow_by_path_inner, run_script_by_path_inner, RunJobQuery}, + resources::get_resource_value_interpolated_internal, + users::fetch_api_authed, +}; +use serde_json::value::RawValue; +use std::collections::HashMap; + +use axum::{ + routing::{delete, get, post}, + Router, +}; +use handler::{ + alter_publication, create_postgres_trigger, create_publication, create_slot, + create_template_script, delete_postgres_trigger, delete_publication, drop_slot_name, + exists_postgres_trigger, get_postgres_trigger, get_publication_info, get_template_script, + is_database_in_logical_level, list_database_publication, list_postgres_triggers, + list_slot_name, set_enabled, update_postgres_trigger, Database, PostgresTrigger, +}; +use windmill_common::{db::UserDB, error::Error, utils::StripPath}; +use windmill_queue::PushArgsOwned; + +mod bool; +mod converter; +mod handler; +mod hex; +mod mapper; +mod relation; +mod replication_message; +mod trigger; + +pub use trigger::start_database; + +pub async fn get_database_resource( + authed: ApiAuthed, + user_db: Option, + db: &DB, + database_resource_path: &str, + w_id: &str, +) -> Result { + let resource = get_resource_value_interpolated_internal( + &authed, + user_db, + &db, + &w_id, + &database_resource_path, + None, + "", + ) + .await + .map_err(|_| Error::NotFound("Database resource do not exist".to_string()))?; + + let resource = match resource { + Some(resource) => serde_json::from_value::(resource).map_err(Error::SerdeJson)?, + None => { + return { + Err(Error::NotFound( + "Database resource do not exist".to_string(), + )) + } + } + }; + + Ok(resource) +} + +fn publication_service() -> Router { + Router::new() + .route("/get/:publication_name/*path", get(get_publication_info)) + .route("/create/:publication_name/*path", post(create_publication)) + .route("/update/:publication_name/*path", post(alter_publication)) + .route( + "/delete/:publication_name/*path", + delete(delete_publication), + ) + .route("/list/*path", get(list_database_publication)) +} + +fn slot_service() -> Router { + Router::new() + .route("/list/*path", get(list_slot_name)) + .route("/create/*path", post(create_slot)) + .route("/delete/*path", delete(drop_slot_name)) +} + +pub fn workspaced_service() -> Router { + Router::new() + .route("/create", post(create_postgres_trigger)) + .route("/list", get(list_postgres_triggers)) + .route("/get/*path", get(get_postgres_trigger)) + .route("/update/*path", post(update_postgres_trigger)) + .route("/delete/*path", delete(delete_postgres_trigger)) + .route("/exists/*path", get(exists_postgres_trigger)) + .route("/setenabled/*path", post(set_enabled)) + .route("/get_template_script/:id", get(get_template_script)) + .route("/create_template_script", post(create_template_script)) + .route( + "/is_valid_postgres_configuration/*path", + get(is_database_in_logical_level), + ) + .nest("/publication", publication_service()) + .nest("/slot", slot_service()) +} + +async fn run_job( + args: Option>>, + extra: Option>>, + db: &DB, + trigger: &PostgresTrigger, +) -> anyhow::Result<()> { + let args = PushArgsOwned { args: args.unwrap_or_default(), extra }; + let label_prefix = Some(format!("db-{}-", trigger.path)); + + let authed = fetch_api_authed( + trigger.edited_by.clone(), + trigger.email.clone(), + &trigger.workspace_id, + db, + Some("anonymous".to_string()), + ) + .await?; + + let user_db = UserDB::new(db.clone()); + + let run_query = RunJobQuery::default(); + + if trigger.is_flow { + run_flow_by_path_inner( + authed, + db.clone(), + user_db, + trigger.workspace_id.clone(), + StripPath(trigger.script_path.to_owned()), + run_query, + args, + label_prefix, + ) + .await?; + } else { + run_script_by_path_inner( + authed, + db.clone(), + user_db, + trigger.workspace_id.clone(), + StripPath(trigger.script_path.to_owned()), + run_query, + args, + label_prefix, + ) + .await?; + } + + Ok(()) +} diff --git a/backend/windmill-api/src/postgres_triggers/relation.rs b/backend/windmill-api/src/postgres_triggers/relation.rs new file mode 100644 index 0000000000000..f893f0ed5bc09 --- /dev/null +++ b/backend/windmill-api/src/postgres_triggers/relation.rs @@ -0,0 +1,74 @@ +use core::str; + +use serde_json::{Map, Value}; +use std::{collections::HashMap, str::Utf8Error}; + +use super::{ + converter::{Converter, ConverterError}, + replication_message::{Columns, RelationBody, TupleData}, +}; +use rust_postgres::types::Oid; +#[derive(Debug, thiserror::Error)] +pub enum RelationConversionError { + #[error("Could not find matching table")] + FailToFindMatchingTable, + + #[error("Binary data not supported")] + BinaryFormatNotSupported, + + #[error("decode error: {0}")] + FromBytes(#[from] ConverterError), + + #[error("invalid string value")] + InvalidStr(#[from] Utf8Error), +} + +pub struct RelationConverter(HashMap); + +impl RelationConverter { + pub fn new() -> Self { + Self(HashMap::new()) + } + + pub fn add_relation(&mut self, relation: RelationBody) { + self.0.insert(relation.o_id, relation); + } + + pub fn get_columns(&self, o_id: Oid) -> Result<&Columns, RelationConversionError> { + self.0 + .get(&o_id) + .map(|relation_body| &relation_body.columns) + .ok_or(RelationConversionError::FailToFindMatchingTable) + } + + pub fn get_relation(&self, o_id: Oid) -> Result<&RelationBody, RelationConversionError> { + self.0 + .get(&o_id) + .ok_or(RelationConversionError::FailToFindMatchingTable) + } + + pub fn body_to_json( + &self, + to_decode: (Oid, Vec), + ) -> Result, RelationConversionError> { + let (o_id, tuple_data) = to_decode; + let mut object: Map = Map::new(); + let columns = self.get_columns(o_id)?; + + for (i, column) in columns.iter().enumerate() { + let value = match &tuple_data[i] { + TupleData::Null | TupleData::UnchangedToast => Value::Null, + TupleData::Binary(_) => { + return Err(RelationConversionError::BinaryFormatNotSupported) + } + TupleData::Text(bytes) => { + let str = str::from_utf8(&bytes[..])?; + Converter::try_from_str(column.type_o_id.clone(), str)? + } + }; + + object.insert(column.name.clone(), value); + } + Ok(object) + } +} diff --git a/backend/windmill-api/src/postgres_triggers/replication_message.rs b/backend/windmill-api/src/postgres_triggers/replication_message.rs new file mode 100644 index 0000000000000..e33f4bbf867f3 --- /dev/null +++ b/backend/windmill-api/src/postgres_triggers/replication_message.rs @@ -0,0 +1,510 @@ +#![allow(unused)] + +use core::str; +use std::{ + cmp, + io::{self, Cursor, Read}, + str::Utf8Error, +}; + +use byteorder::{BigEndian, ReadBytesExt}; +use bytes::Bytes; +use rust_postgres::types::{Oid, Type}; +use thiserror::Error; + +use super::trigger::LogicalReplicationSettings; +const PRIMARY_KEEPALIVE_BYTE: u8 = b'k'; +const X_LOG_DATA_BYTE: u8 = b'w'; + +/** +* This implementation is inspired by Postgres replication functionality +* from https://github.com/supabase/pg_replicate +* +* Original implementation: +* - https://github.com/supabase/pg_replicate/blob/main/pg_replicate/src/conversions/cdc_event.rs +* +*/ + +#[derive(Debug)] +pub struct PrimaryKeepAliveBody { + pub wal_end: u64, + pub timestamp: i64, + pub reply: bool, +} + +impl PrimaryKeepAliveBody { + pub fn new(wal_end: u64, timestamp: i64, reply: bool) -> PrimaryKeepAliveBody { + PrimaryKeepAliveBody { wal_end, timestamp, reply } + } +} + +const BEGIN_BYTE: u8 = b'B'; +const COMMIT_BYTE: u8 = b'C'; +const ORIGIN_BYTE: u8 = b'O'; +const RELATION_BYTE: u8 = b'R'; +const TYPE_BYTE: u8 = b'Y'; +const INSERT_BYTE: u8 = b'I'; +const UPDATE_BYTE: u8 = b'U'; +const DELETE_BYTE: u8 = b'D'; +const TUPLE_NEW_BYTE: u8 = b'N'; +const TUPLE_KEY_BYTE: u8 = b'K'; +const TUPLE_OLD_BYTE: u8 = b'O'; +const TUPLE_DATA_NULL_BYTE: u8 = b'n'; +const TUPLE_DATA_TOAST_BYTE: u8 = b'u'; +const TUPLE_DATA_TEXT_BYTE: u8 = b't'; +const TUPLE_DATA_BINARY_BYTE: u8 = b'b'; + +const REPLICA_IDENTITY_DEFAULT_BYTE: i8 = 0x64; +const REPLICA_IDENTITY_NOTHING_BYTE: i8 = 0x6E; +const REPLICA_IDENTITY_FULL_BYTE: i8 = 0x66; +const REPLICA_IDENTITY_INDEX_BYTE: i8 = 0x69; + +#[derive(Debug)] +pub enum ReplicaIdentity { + Default, + Nothing, + Full, + Index, +} + +#[derive(Debug)] +pub struct Column { + pub flags: i8, + pub name: String, + pub type_o_id: Option, + pub type_modifier: i32, +} + +impl Column { + pub fn new(flags: i8, name: String, type_o_id: Option, type_modifier: i32) -> Self { + Self { flags, name, type_o_id, type_modifier } + } +} + +pub type Columns = Vec; + +#[derive(Debug)] +pub struct RelationBody { + pub transaction_id: Option, + pub o_id: Oid, + pub namespace: String, + pub name: String, + pub replica_identity: ReplicaIdentity, + pub columns: Columns, +} + +impl RelationBody { + pub fn new( + transaction_id: Option, + o_id: Oid, + namespace: String, + name: String, + replica_identity: ReplicaIdentity, + columns: Columns, + ) -> Self { + Self { transaction_id, o_id, namespace, name, replica_identity, columns } + } +} + +#[derive(Debug)] +pub struct InsertBody { + pub transaction_id: Option, + pub o_id: Oid, + pub tuple: Vec, +} + +impl InsertBody { + pub fn new(transaction_id: Option, o_id: Oid, tuple: Vec) -> Self { + Self { transaction_id, o_id, tuple } + } +} + +#[derive(Debug)] +pub struct UpdateBody { + transaction_id: Option, + pub o_id: Oid, + pub old_tuple: Option>, + pub key_tuple: Option>, + pub new_tuple: Vec, +} + +impl UpdateBody { + pub fn new( + transaction_id: Option, + o_id: Oid, + old_tuple: Option>, + key_tuple: Option>, + new_tuple: Vec, + ) -> Self { + Self { transaction_id, o_id, old_tuple, key_tuple, new_tuple } + } +} + +#[derive(Debug)] +pub struct DeleteBody { + transaction_id: Option, + pub o_id: Oid, + pub old_tuple: Option>, + pub key_tuple: Option>, +} + +impl DeleteBody { + pub fn new( + transaction_id: Option, + o_id: Oid, + old_tuple: Option>, + key_tuple: Option>, + ) -> Self { + Self { transaction_id, o_id, old_tuple, key_tuple } + } +} + +#[derive(Debug)] +pub enum TupleData { + Null, + UnchangedToast, + Text(Bytes), + Binary(Bytes), +} + +impl TupleData { + fn parse(buf: &mut Buffer) -> Result, ConversionError> { + let number_of_columns = buf.read_i16::()?; + let mut tuples = Vec::with_capacity(number_of_columns as usize); + for _ in 0..number_of_columns { + let byte = buf.read_u8()?; + let tuple_data = match byte { + TUPLE_DATA_NULL_BYTE => TupleData::Null, + TUPLE_DATA_TOAST_BYTE => TupleData::UnchangedToast, + TUPLE_DATA_TEXT_BYTE => { + let len = buf.read_i32::()?; + let mut data = vec![0; len as usize]; + buf.read_exact(&mut data)?; + TupleData::Text(data.into()) + } + TUPLE_DATA_BINARY_BYTE => { + let len = buf.read_i32::()?; + let mut data = vec![0; len as usize]; + buf.read_exact(&mut data)?; + TupleData::Binary(data.into()) + } + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown replication message byte `{}`", byte), + ))); + } + }; + + tuples.push(tuple_data); + } + + Ok(tuples) + } +} + +#[derive(Debug)] +pub enum TransactionBody { + Insert(InsertBody), + Update(UpdateBody), + Delete(DeleteBody), +} + +#[non_exhaustive] +#[derive(Debug)] +pub enum LogicalReplicationMessage { + Begin, + Commit, + Relation(RelationBody), + Type, + Insert(InsertBody), + Update(UpdateBody), + Delete(DeleteBody), +} + +#[derive(Debug)] +pub struct XLogDataBody { + pub wal_start: u64, + pub wal_end: u64, + pub timestamp: i64, + pub data: Bytes, +} + +#[derive(Error, Debug)] +pub enum ConversionError { + #[error("Error: {0}")] + Io(#[from] io::Error), + #[error("Utf8Error conversion: {0}")] + Utf8(#[from] Utf8Error), +} + +struct Buffer { + bytes: Bytes, + idx: usize, +} + +impl Buffer { + pub fn new(bytes: Bytes, idx: usize) -> Buffer { + Buffer { bytes, idx } + } + + fn slice(&self) -> &[u8] { + &self.bytes[self.idx..] + } + + fn read_cstr(&mut self) -> Result { + match self.slice().iter().position(|&x| x == 0) { + Some(pos) => { + let start = self.idx; + let end = start + pos; + let cstr = str::from_utf8(&self.bytes[start..end])?.to_owned(); + self.idx = end + 1; + Ok(cstr) + } + None => Err(ConversionError::Io(io::Error::new( + io::ErrorKind::UnexpectedEof, + "unexpected EOF", + ))), + } + } +} + +impl Read for Buffer { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let len = { + let slice = self.slice(); + let len = cmp::min(slice.len(), buf.len()); + buf[..len].copy_from_slice(&slice[..len]); + len + }; + self.idx += len; + Ok(len) + } +} + +impl XLogDataBody { + pub fn new(wal_start: u64, wal_end: u64, timestamp: i64, data: Bytes) -> XLogDataBody { + XLogDataBody { wal_start, wal_end, timestamp, data } + } + + pub fn parse( + self, + logical_replication_settings: &LogicalReplicationSettings, + ) -> Result { + let mut buf = Buffer::new(self.data.clone(), 0); + let byte = buf.read_u8()?; + + let logical_replication_message = match byte { + BEGIN_BYTE => { + buf.read_i64::()?; + buf.read_i64::()?; + buf.read_i32::()?; + + LogicalReplicationMessage::Begin + } + COMMIT_BYTE => { + buf.read_i8()?; + buf.read_u64::()?; + buf.read_u64::()?; + buf.read_i64::()?; + LogicalReplicationMessage::Commit + } + RELATION_BYTE => { + let transaction_id = match logical_replication_settings.streaming { + true => Some(buf.read_i32::()?), + false => None, + }; + + let o_id = buf.read_u32::()?; + let namespace = buf.read_cstr()?; + let name = buf.read_cstr()?; + let replica_identity = match buf.read_i8()? { + REPLICA_IDENTITY_DEFAULT_BYTE => ReplicaIdentity::Default, + REPLICA_IDENTITY_NOTHING_BYTE => ReplicaIdentity::Nothing, + REPLICA_IDENTITY_FULL_BYTE => ReplicaIdentity::Full, + REPLICA_IDENTITY_INDEX_BYTE => ReplicaIdentity::Index, + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown replica identity byte `{}`", byte), + ))); + } + }; + + let num_of_column = buf.read_i16::()?; + + let mut columns = Vec::with_capacity(num_of_column as usize); + for _ in 0..num_of_column { + let flags = buf.read_i8()?; + let name = buf.read_cstr()?; + let o_id = buf.read_u32::()?; + let type_modifier = buf.read_i32::()?; + let type_o_id = Type::from_oid(o_id); + let column = Column::new(flags, name, type_o_id, type_modifier); + + columns.push(column); + } + + LogicalReplicationMessage::Relation(RelationBody::new( + transaction_id, + o_id, + namespace, + name, + replica_identity, + columns, + )) + } + TYPE_BYTE => { + buf.read_u32::()?; + buf.read_cstr()?; + buf.read_cstr()?; + + LogicalReplicationMessage::Type + } + INSERT_BYTE => { + let transaction_id = match logical_replication_settings.streaming { + true => Some(buf.read_i32::()?), + false => None, + }; + let o_id = buf.read_u32::()?; + let byte = buf.read_u8()?; + + let tuple = match byte { + TUPLE_NEW_BYTE => TupleData::parse(&mut buf)?, + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unexpected tuple byte `{}`", byte), + ))); + } + }; + + LogicalReplicationMessage::Insert(InsertBody::new(transaction_id, o_id, tuple)) + } + UPDATE_BYTE => { + let transaction_id = match logical_replication_settings.streaming { + true => Some(buf.read_i32::()?), + false => None, + }; + let o_id = buf.read_u32::()?; + let byte = buf.read_u8()?; + let mut key_tuple = None; + let mut old_tuple = None; + + let new_tuple = match byte { + TUPLE_NEW_BYTE => TupleData::parse(&mut buf)?, + TUPLE_OLD_BYTE | TUPLE_KEY_BYTE => { + if byte == TUPLE_OLD_BYTE { + old_tuple = Some(TupleData::parse(&mut buf)?); + } else { + key_tuple = Some(TupleData::parse(&mut buf)?); + } + match buf.read_u8()? { + TUPLE_NEW_BYTE => TupleData::parse(&mut buf)?, + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unexpected tuple byte `{}`", byte), + ))); + } + } + } + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown tuple byte `{}`", byte), + ))); + } + }; + + LogicalReplicationMessage::Update(UpdateBody::new( + transaction_id, + o_id, + old_tuple, + key_tuple, + new_tuple, + )) + } + DELETE_BYTE => { + let transaction_id = match logical_replication_settings.streaming { + true => Some(buf.read_i32::()?), + false => None, + }; + let o_id = buf.read_u32::()?; + let tag = buf.read_u8()?; + + let mut key_tuple = None; + let mut old_tuple = None; + + match tag { + TUPLE_OLD_BYTE => old_tuple = Some(TupleData::parse(&mut buf)?), + TUPLE_KEY_BYTE => key_tuple = Some(TupleData::parse(&mut buf)?), + tag => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown tuple tag `{}`", tag), + ))); + } + } + + LogicalReplicationMessage::Delete(DeleteBody::new( + transaction_id, + o_id, + old_tuple, + key_tuple, + )) + } + byte => { + return Err(ConversionError::Io(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown replication message tag `{}`", byte), + ))); + } + }; + + Ok(logical_replication_message) + } +} + +#[non_exhaustive] +#[derive(Debug)] +pub enum ReplicationMessage { + XLogData(XLogDataBody), + PrimaryKeepAlive(PrimaryKeepAliveBody), +} + +impl ReplicationMessage { + pub fn parse(buf: Bytes) -> io::Result { + let (byte, mut message) = buf.split_first().unwrap(); + + let replication_message = match *byte { + X_LOG_DATA_BYTE => { + let len = buf.len(); + let wal_start = message.read_u64::()?; + let wal_end = message.read_u64::()?; + let timestamp = message.read_i64::()?; + let len = len - message.len(); + let data = buf.slice(len..); + ReplicationMessage::XLogData(XLogDataBody::new(wal_start, wal_end, timestamp, data)) + } + PRIMARY_KEEPALIVE_BYTE => { + let wal_end = message.read_u64::()?; + let timestamp = message.read_i64::()?; + let reply = message.read_u8()?; + ReplicationMessage::PrimaryKeepAlive(PrimaryKeepAliveBody::new( + wal_end, + timestamp, + reply == 1, + )) + } + byte => { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("unknown replication message byte `{}`", byte), + )); + } + }; + + Ok(replication_message) + } +} diff --git a/backend/windmill-api/src/postgres_triggers/trigger.rs b/backend/windmill-api/src/postgres_triggers/trigger.rs new file mode 100644 index 0000000000000..0ed9891cbc8be --- /dev/null +++ b/backend/windmill-api/src/postgres_triggers/trigger.rs @@ -0,0 +1,523 @@ +use std::{collections::HashMap, pin::Pin}; + +use crate::{ + db::DB, + postgres_triggers::{ + get_database_resource, + relation::RelationConverter, + replication_message::{ + LogicalReplicationMessage::{Begin, Commit, Delete, Insert, Relation, Type, Update}, + ReplicationMessage, + }, + run_job, + }, + users::fetch_api_authed, +}; +use bytes::{BufMut, Bytes, BytesMut}; +use chrono::TimeZone; +use futures::{pin_mut, SinkExt, StreamExt}; +use pg_escape::{quote_identifier, quote_literal}; +use rand::seq::SliceRandom; +use rust_postgres::{Client, Config, CopyBothDuplex, NoTls, SimpleQueryMessage}; +use windmill_common::{ + db::UserDB, utils::report_critical_error, worker::to_raw_value, INSTANCE_NAME, +}; + +use super::{ + handler::{Database, PostgresTrigger}, + replication_message::PrimaryKeepAliveBody, +}; + +pub struct LogicalReplicationSettings { + pub streaming: bool, +} + +impl LogicalReplicationSettings { + pub fn new(streaming: bool) -> Self { + Self { streaming } + } +} + +#[allow(unused)] +trait RowExist { + fn row_exist(&self) -> bool; +} + +impl RowExist for Vec { + fn row_exist(&self) -> bool { + self.iter() + .find_map(|element| { + if let SimpleQueryMessage::CommandComplete(value) = element { + Some(*value) + } else { + None + } + }) + .is_some_and(|value| value > 0) + } +} + +#[derive(thiserror::Error, Debug)] +enum Error { + #[error("Error from database: {0}")] + Postgres(rust_postgres::Error), + #[error("Error : {0}")] + Common(windmill_common::error::Error), +} + +pub struct PostgresSimpleClient(Client); + +impl PostgresSimpleClient { + async fn new(database: &Database) -> Result { + let mut config = Config::new(); + config + .dbname(&database.dbname) + .host(&database.host) + .port(database.port) + .user(&database.user) + .replication_mode(rust_postgres::config::ReplicationMode::Logical); + + if !database.password.is_empty() { + config.password(&database.password); + } + + let (client, connection) = config.connect(NoTls).await.map_err(Error::Postgres)?; + tokio::spawn(async move { + if let Err(e) = connection.await { + tracing::debug!("{:#?}", e); + }; + tracing::info!("Successfully Connected into database"); + }); + + Ok(PostgresSimpleClient(client)) + } + + async fn get_logical_replication_stream( + &self, + publication_name: &str, + logical_replication_slot_name: &str, + ) -> Result<(CopyBothDuplex, LogicalReplicationSettings), Error> { + let options = format!( + r#"("proto_version" '2', "publication_names" {})"#, + quote_literal(publication_name), + ); + + let query = format!( + r#"START_REPLICATION SLOT {} LOGICAL 0/0 {}"#, + quote_identifier(logical_replication_slot_name), + options + ); + + Ok(( + self.0 + .copy_both_simple::(query.as_str()) + .await + .map_err(Error::Postgres)?, + LogicalReplicationSettings::new(false), + )) + } + + async fn send_status_update( + primary_keep_alive: PrimaryKeepAliveBody, + copy_both_stream: &mut Pin<&mut CopyBothDuplex>, + ) { + let mut buf = BytesMut::new(); + let ts = chrono::Utc.with_ymd_and_hms(2000, 1, 1, 0, 0, 0).unwrap(); + let ts = chrono::Utc::now() + .signed_duration_since(ts) + .num_microseconds() + .unwrap_or(0); + + buf.put_u8(b'r'); + buf.put_u64(primary_keep_alive.wal_end); + buf.put_u64(primary_keep_alive.wal_end); + buf.put_u64(primary_keep_alive.wal_end); + buf.put_i64(ts); + buf.put_u8(0); + copy_both_stream.send(buf.freeze()).await.unwrap(); + tracing::debug!("Send update status message"); + } +} + +async fn update_ping( + db: &DB, + postgres_trigger: &PostgresTrigger, + error: Option<&str>, +) -> Option<()> { + let updated = sqlx::query_scalar!( + r#" + UPDATE + postgres_trigger + SET + last_server_ping = now(), + error = $1 + WHERE + workspace_id = $2 + AND path = $3 + AND server_id = $4 + AND enabled IS TRUE + RETURNING 1 + "#, + error, + &postgres_trigger.workspace_id, + &postgres_trigger.path, + *INSTANCE_NAME + ) + .fetch_optional(db) + .await; + + match updated { + Ok(updated) => { + if updated.flatten().is_none() { + // allow faster restart of database trigger + sqlx::query!( + r#" + UPDATE + postgres_trigger + SET + last_server_ping = NULL + WHERE + workspace_id = $1 + AND path = $2 + AND server_id IS NULL"#, + &postgres_trigger.workspace_id, + &postgres_trigger.path, + ) + .execute(db) + .await + .ok(); + tracing::info!( + "Postgres trigger {} changed, disabled, or deleted, stopping...", + postgres_trigger.path + ); + return None; + } + } + Err(err) => { + tracing::warn!( + "Error updating ping of postgres trigger {}: {:?}", + postgres_trigger.path, + err + ); + } + }; + + Some(()) +} + +async fn loop_ping(db: &DB, postgres_trigger: &PostgresTrigger, error: Option<&str>) { + loop { + if update_ping(db, postgres_trigger, error).await.is_none() { + return; + } + + tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; + } +} + +async fn disable_with_error(postgres_trigger: &PostgresTrigger, db: &DB, error: String) -> () { + match sqlx::query!( + "UPDATE postgres_trigger SET enabled = FALSE, error = $1, server_id = NULL, last_server_ping = NULL WHERE workspace_id = $2 AND path = $3", + error, + postgres_trigger.workspace_id, + postgres_trigger.path, + ) + .execute(db).await { + Ok(_) => { + report_critical_error(format!("Disabling postgres trigger {} because of error: {}", postgres_trigger.path, error), db.clone(), Some(&postgres_trigger.workspace_id), None).await; + }, + Err(disable_err) => { + report_critical_error( + format!("Could not disable postgres trigger {} with err {}, disabling because of error {}", postgres_trigger.path, disable_err, error), + db.clone(), + Some(&postgres_trigger.workspace_id), + None, + ).await; + } + } +} + +async fn listen_to_transactions( + postgres_trigger: &PostgresTrigger, + db: DB, + mut killpill_rx: tokio::sync::broadcast::Receiver<()>, +) { + let start_logical_replication_streaming = async { + let authed = fetch_api_authed( + postgres_trigger.edited_by.clone(), + postgres_trigger.email.clone(), + &postgres_trigger.workspace_id, + &db, + None, + ) + .await + .map_err(Error::Common)?; + + let database = get_database_resource( + authed, + Some(UserDB::new(db.clone())), + &db, + &postgres_trigger.postgres_resource_path, + &postgres_trigger.workspace_id, + ) + .await + .map_err(Error::Common)?; + + let client = PostgresSimpleClient::new(&database).await?; + + let (logical_replication_stream, logical_replication_settings) = client + .get_logical_replication_stream( + &postgres_trigger.publication_name, + &postgres_trigger.replication_slot_name, + ) + .await?; + + Ok::<_, Error>((logical_replication_stream, logical_replication_settings)) + }; + + tokio::select! { + biased; + _ = killpill_rx.recv() => { + return; + } + _ = loop_ping(&db, postgres_trigger, Some("Connecting...")) => { + return; + } + result = start_logical_replication_streaming => { + tokio::select! { + biased; + _ = killpill_rx.recv() => { + return; + } + _ = loop_ping(&db, postgres_trigger, None) => { + return; + } + _ = { + async { + match result { + Ok((logical_replication_stream, logical_replication_settings)) => { + pin_mut!(logical_replication_stream); + let mut relations = RelationConverter::new(); + tracing::info!("Starting to listen for postgres trigger {}", postgres_trigger.path); + loop { + let message = logical_replication_stream.next().await; + + let message = match message { + Some(message) => message, + None => { + tracing::info!("Stream for postgres trigger {} is empty, leaving....", postgres_trigger.path); + return; + } + }; + + let message = match message { + Ok(message) => message, + Err(err) => { + let err = format!("Postgres trigger named {} had an error while receiving a message : {}", &postgres_trigger.path, err.to_string()); + disable_with_error(&postgres_trigger, &db, err).await; + return; + } + }; + + let logical_message = match ReplicationMessage::parse(message) { + Ok(logical_message) => logical_message, + Err(err) => { + let err = format!("Postgres trigger named: {} had an error while parsing message: {}", postgres_trigger.path, err.to_string()); + disable_with_error(&postgres_trigger, &db, err).await; + return; + } + }; + + + match logical_message { + ReplicationMessage::PrimaryKeepAlive(primary_keep_alive) => { + if primary_keep_alive.reply { + PostgresSimpleClient::send_status_update(primary_keep_alive, &mut logical_replication_stream).await; + } + } + ReplicationMessage::XLogData(x_log_data) => { + let logical_replication_message = match x_log_data.parse(&logical_replication_settings) { + Ok(logical_replication_message) => logical_replication_message, + Err(err) => { + tracing::error!("Postgres trigger named: {} had an error while trying to parse incomming stream message: {}", &postgres_trigger.path, err.to_string()); + continue; + } + }; + + let json = match logical_replication_message { + Relation(relation_body) => { + relations.add_relation(relation_body); + None + } + Begin | Type | Commit => { + None + } + Insert(insert) => { + Some((insert.o_id, relations.body_to_json((insert.o_id, insert.tuple)), "insert")) + } + Update(update) => { + Some((update.o_id, relations.body_to_json((update.o_id, update.new_tuple)), "update")) + } + Delete(delete) => { + let body = delete.old_tuple.unwrap_or(delete.key_tuple.unwrap()); + Some((delete.o_id, relations.body_to_json((delete.o_id, body)), "delete")) + } + }; + if let Some((o_id, Ok(body), transaction_type)) = json { + let relation = match relations.get_relation(o_id) { + Ok(relation) => relation, + Err(err) => { + tracing::error!("Postgres trigger named: {}, error: {}", &postgres_trigger.path, err.to_string()); + continue; + } + }; + let database_info = HashMap::from([ + ("schema_name".to_string(), to_raw_value(&relation.namespace)), + ("table_name".to_string(), to_raw_value(&relation.name)), + ("transaction_type".to_string(), to_raw_value(&transaction_type)), + ("row".to_string(), to_raw_value(&body)), + ]); + let extra = Some(HashMap::from([( + "wm_trigger".to_string(), + to_raw_value(&serde_json::json!({"kind": "postgres", })), + )])); + let _ = run_job(Some(database_info), extra, &db, postgres_trigger).await; + } + + } + } + } + } + Err(err) => { + tracing::error!("Postgres trigger error while trying to start_logical_replication_streaming: {}", err.to_string()) + } + } + } + } => { + return; + } + } + } + } +} + +async fn try_to_listen_to_database_transactions( + pg_trigger: PostgresTrigger, + db: DB, + killpill_rx: tokio::sync::broadcast::Receiver<()>, +) { + let postgres_trigger = sqlx::query_scalar!( + r#" + UPDATE postgres_trigger + SET + server_id = $1, + last_server_ping = now(), + error = 'Connecting...' + WHERE + enabled IS TRUE + AND workspace_id = $2 + AND path = $3 + AND (last_server_ping IS NULL + OR last_server_ping < now() - INTERVAL '15 seconds' + ) + RETURNING true + "#, + *INSTANCE_NAME, + pg_trigger.workspace_id, + pg_trigger.path, + ) + .fetch_optional(&db) + .await; + match postgres_trigger { + Ok(has_lock) => { + if has_lock.flatten().unwrap_or(false) { + tracing::info!("Spawning new task to listen_to_database_transaction"); + tokio::spawn(async move { + listen_to_transactions(&pg_trigger, db.clone(), killpill_rx).await; + }); + } else { + tracing::info!( + "Postgres trigger {} already being listened to", + pg_trigger.path + ); + } + } + Err(err) => { + tracing::error!( + "Error acquiring lock for postgres trigger {}: {:?}", + pg_trigger.path, + err + ); + } + }; +} + +async fn listen_to_unlistened_database_events( + db: &DB, + killpill_rx: &tokio::sync::broadcast::Receiver<()>, +) { + let postgres_triggers = sqlx::query_as!( + PostgresTrigger, + r#" + SELECT + workspace_id, + path, + script_path, + replication_slot_name, + publication_name, + is_flow, + edited_by, + email, + edited_at, + server_id, + last_server_ping, + extra_perms, + error, + enabled, + postgres_resource_path + FROM + postgres_trigger + WHERE + enabled IS TRUE + AND (last_server_ping IS NULL OR + last_server_ping < now() - interval '15 seconds' + ) + "# + ) + .fetch_all(db) + .await; + + match postgres_triggers { + Ok(mut triggers) => { + triggers.shuffle(&mut rand::thread_rng()); + for trigger in triggers { + try_to_listen_to_database_transactions( + trigger, + db.clone(), + killpill_rx.resubscribe(), + ) + .await; + } + } + Err(err) => { + tracing::error!("Error fetching postgres triggers: {:?}", err); + } + }; +} + +pub async fn start_database(db: DB, mut killpill_rx: tokio::sync::broadcast::Receiver<()>) { + tokio::spawn(async move { + listen_to_unlistened_database_events(&db, &killpill_rx).await; + loop { + tokio::select! { + biased; + _ = killpill_rx.recv() => { + return; + } + _ = tokio::time::sleep(tokio::time::Duration::from_secs(15)) => { + listen_to_unlistened_database_events(&db, &killpill_rx).await + } + } + } + }); +} diff --git a/backend/windmill-api/src/slack_approvals.rs b/backend/windmill-api/src/slack_approvals.rs index 1f48bd07dc4e9..9b02f4c73af1a 100644 --- a/backend/windmill-api/src/slack_approvals.rs +++ b/backend/windmill-api/src/slack_approvals.rs @@ -448,10 +448,16 @@ async fn transform_schemas( let is_required = required.unwrap().contains(key); let default_value = default_args_json.and_then(|json| json.get(key).cloned()); - let dynamic_enums_value = dynamic_enums_json.and_then(|json| json.get(key).cloned()); - - let input_block = - create_input_block(key, schema, is_required, default_value, dynamic_enums_value); + let dynamic_enums_value = + dynamic_enums_json.and_then(|json| json.get(key).cloned()); + + let input_block = create_input_block( + key, + schema, + is_required, + default_value, + dynamic_enums_value, + ); match input_block { serde_json::Value::Array(arr) => blocks.extend(arr), _ => blocks.push(input_block), @@ -536,7 +542,7 @@ fn create_input_block( // Handle date-time format if let FieldType::String = schema.r#type { - if schema.format.as_deref() == Some("date-time") { + if schema.format.as_deref() == Some("date-time") { tracing::debug!("Date-time type"); let now = chrono::Local::now(); let current_date = now.format("%Y-%m-%d").to_string(); diff --git a/backend/windmill-api/src/triggers.rs b/backend/windmill-api/src/triggers.rs index 0f3edbc87c3c5..7101bdd2a9eed 100644 --- a/backend/windmill-api/src/triggers.rs +++ b/backend/windmill-api/src/triggers.rs @@ -20,6 +20,7 @@ pub struct TriggersCount { websocket_count: i64, kafka_count: i64, nats_count: i64, + postgres_count: i64, } pub(crate) async fn get_triggers_count_internal( db: &DB, @@ -86,6 +87,16 @@ pub(crate) async fn get_triggers_count_internal( .await? .unwrap_or(0); + let postgres_count = sqlx::query_scalar!( + "SELECT COUNT(*) FROM postgres_trigger WHERE script_path = $1 AND is_flow = $2 AND workspace_id = $3", + path, + is_flow, + w_id + ) + .fetch_one(db) + .await? + .unwrap_or(0); + let webhook_count = (if is_flow { sqlx::query_scalar!( "SELECT COUNT(*) FROM token WHERE label LIKE 'webhook-%' AND workspace_id = $1 AND scopes @> ARRAY['run:flow/' || $2]::text[]", @@ -129,6 +140,7 @@ pub(crate) async fn get_triggers_count_internal( websocket_count, kafka_count, nats_count, + postgres_count, })) } diff --git a/backend/windmill-api/src/variables.rs b/backend/windmill-api/src/variables.rs index aaf3b1dd4949e..48800621b6efb 100644 --- a/backend/windmill-api/src/variables.rs +++ b/backend/windmill-api/src/variables.rs @@ -676,3 +676,28 @@ pub async fn get_value_internal<'c>( Ok(r) } + +pub async fn get_variable_or_self(path: String, db: &DB, w_id: &str) -> Result { + if !path.starts_with("$var:") { + return Ok(path); + } + let path = path.strip_prefix("$var:").unwrap().to_string(); + + let record = sqlx::query!( + "SELECT value, is_secret + FROM variable + WHERE path = $1 AND workspace_id = $2", + &path, + &w_id + ) + .fetch_one(db) + .await?; + + let mut value = record.value; + if record.is_secret { + let mc = build_crypt(db, w_id).await?; + value = decrypt(&mc, value)?; + } + + Ok(value) +} diff --git a/backend/windmill-api/src/websocket_triggers.rs b/backend/windmill-api/src/websocket_triggers.rs index 2f00a1f5a4291..d6260691c7e3d 100644 --- a/backend/windmill-api/src/websocket_triggers.rs +++ b/backend/windmill-api/src/websocket_triggers.rs @@ -380,7 +380,7 @@ async fn exists_websocket_trigger( async fn listen_to_unlistened_websockets( db: &DB, killpill_rx: &tokio::sync::broadcast::Receiver<()>, -) -> () { +) { match sqlx::query_as::<_, WebsocketTrigger>( r#"SELECT * FROM websocket_trigger diff --git a/backend/windmill-api/src/workspaces.rs b/backend/windmill-api/src/workspaces.rs index 144d58f3ade50..a4adc61977990 100644 --- a/backend/windmill-api/src/workspaces.rs +++ b/backend/windmill-api/src/workspaces.rs @@ -35,7 +35,7 @@ use windmill_audit::ActionKind; use windmill_common::db::UserDB; use windmill_common::s3_helpers::LargeFileStorage; use windmill_common::users::username_to_permissioned_as; -use windmill_common::variables::build_crypt; +use windmill_common::variables::{build_crypt, decrypt, encrypt}; use windmill_common::worker::to_raw_value; #[cfg(feature = "enterprise")] use windmill_common::workspaces::WorkspaceDeploymentUISettings; @@ -52,7 +52,6 @@ use windmill_git_sync::handle_deployment_metadata; #[cfg(feature = "enterprise")] use windmill_common::utils::require_admin_or_devops; -use windmill_common::variables::{decrypt, encrypt}; use hyper::StatusCode; use serde::{Deserialize, Serialize}; use sqlx::{FromRow, Postgres, Transaction}; @@ -1304,6 +1303,7 @@ struct UsedTriggers { pub http_routes_used: bool, pub kafka_used: bool, pub nats_used: bool, + pub postgres_used: bool, } async fn get_used_triggers( @@ -1314,12 +1314,17 @@ async fn get_used_triggers( let mut tx = user_db.begin(&authed).await?; let websocket_used = sqlx::query_as!( UsedTriggers, - r#"SELECT - EXISTS(SELECT 1 FROM websocket_trigger WHERE workspace_id = $1) as "websocket_used!", - EXISTS(SELECT 1 FROM http_trigger WHERE workspace_id = $1) as "http_routes_used!", + r#" + SELECT + + EXISTS(SELECT 1 FROM websocket_trigger WHERE workspace_id = $1) AS "websocket_used!", + + EXISTS(SELECT 1 FROM http_trigger WHERE workspace_id = $1) AS "http_routes_used!", EXISTS(SELECT 1 FROM kafka_trigger WHERE workspace_id = $1) as "kafka_used!", - EXISTS(SELECT 1 FROM nats_trigger WHERE workspace_id = $1) as "nats_used!""#, - w_id, + EXISTS(SELECT 1 FROM nats_trigger WHERE workspace_id = $1) as "nats_used!", + EXISTS(SELECT 1 FROM postgres_trigger WHERE workspace_id = $1) AS "postgres_used!" + "#, + w_id ) .fetch_one(&mut *tx) .await?; diff --git a/backend/windmill-api/src/workspaces_export.rs b/backend/windmill-api/src/workspaces_export.rs index 70fea1ea5a4d8..0a417ff916fd8 100644 --- a/backend/windmill-api/src/workspaces_export.rs +++ b/backend/windmill-api/src/workspaces_export.rs @@ -25,18 +25,16 @@ use axum::{ use http::HeaderName; use itertools::Itertools; -use windmill_common::db::UserDB; -use windmill_common::schedule::Schedule; -use windmill_common::variables::build_crypt; - +use windmill_common::variables::decrypt; use windmill_common::{ + db::UserDB, error::{to_anyhow, Error, Result}, flows::Flow, + schedule::Schedule, scripts::{Schema, Script, ScriptLang}, - variables::ExportableListableVariable, + variables::{build_crypt, ExportableListableVariable}, }; -use windmill_common::variables::decrypt; use hyper::header; use serde::{Deserialize, Serialize}; use serde_json::Value; diff --git a/backend/windmill-common/Cargo.toml b/backend/windmill-common/Cargo.toml index 2c689cecfbbbd..8f1bc299ea9d9 100644 --- a/backend/windmill-common/Cargo.toml +++ b/backend/windmill-common/Cargo.toml @@ -61,6 +61,7 @@ async-stream.workspace = true const_format.workspace = true crc.workspace = true windmill-macros.workspace = true + semver.workspace = true croner = "2.0.6" quick_cache.workspace = true diff --git a/backend/windmill-common/src/lib.rs b/backend/windmill-common/src/lib.rs index 4c33fae7b0a4f..e47c4fc87fe27 100644 --- a/backend/windmill-common/src/lib.rs +++ b/backend/windmill-common/src/lib.rs @@ -248,18 +248,16 @@ pub async fn connect_db( Err(_) => { if server_mode { DEFAULT_MAX_CONNECTIONS_SERVER + } else if indexer_mode { + DEFAULT_MAX_CONNECTIONS_INDEXER } else { - if indexer_mode { - DEFAULT_MAX_CONNECTIONS_INDEXER - } else { - DEFAULT_MAX_CONNECTIONS_WORKER - + std::env::var("NUM_WORKERS") - .ok() - .map(|x| x.parse().ok()) - .flatten() - .unwrap_or(1) - - 1 - } + DEFAULT_MAX_CONNECTIONS_WORKER + + std::env::var("NUM_WORKERS") + .ok() + .map(|x| x.parse().ok()) + .flatten() + .unwrap_or(1) + - 1 } } }; diff --git a/backend/windmill-common/src/variables.rs b/backend/windmill-common/src/variables.rs index 7cb96d1927270..7f91758c90829 100644 --- a/backend/windmill-common/src/variables.rs +++ b/backend/windmill-common/src/variables.rs @@ -6,13 +6,13 @@ * LICENSE-AGPL for a copy of the license. */ +use crate::error::Result; +use crate::{worker::WORKER_GROUP, BASE_URL, DB}; use chrono::{SecondsFormat, Utc}; use magic_crypt::{MagicCrypt256, MagicCryptError, MagicCryptTrait}; use serde::{Deserialize, Serialize}; use crate::error; -use crate::{worker::WORKER_GROUP, BASE_URL, DB}; - lazy_static::lazy_static! { pub static ref SECRET_SALT: Option = std::env::var("SECRET_SALT").ok(); } @@ -133,7 +133,7 @@ pub async fn get_secret_value_as_admin( let r = if variable.is_secret { let value = variable.value; if !value.is_empty() { - let mc = build_crypt(&db, &w_id).await?; + let mc = build_crypt(db, w_id).await?; decrypt_value_with_mc(value, mc).await? } else { "".to_string() @@ -145,17 +145,14 @@ pub async fn get_secret_value_as_admin( Ok(r) } -pub async fn decrypt_value_with_mc( - value: String, - mc: MagicCrypt256, -) -> Result { - Ok(mc.decrypt_base64_to_string(value).map_err(|e| match e { +pub async fn decrypt_value_with_mc(value: String, mc: MagicCrypt256) -> Result { + mc.decrypt_base64_to_string(value).map_err(|e| match e { MagicCryptError::DecryptError(_) => crate::error::Error::InternalErr( "Could not decrypt value. The value may have been encrypted with a different key." .to_string(), ), _ => crate::error::Error::InternalErr(e.to_string()), - })?) + }) } pub fn encrypt(mc: &MagicCrypt256, value: &str) -> String { diff --git a/backend/windmill-indexer/src/indexer_ee.rs b/backend/windmill-indexer/src/indexer_ee.rs index e69de29bb2d1d..8b137891791fe 100644 --- a/backend/windmill-indexer/src/indexer_ee.rs +++ b/backend/windmill-indexer/src/indexer_ee.rs @@ -0,0 +1 @@ + diff --git a/backend/windmill-queue/Cargo.toml b/backend/windmill-queue/Cargo.toml index a89a9d56fac91..a90fa6ccf74b5 100644 --- a/backend/windmill-queue/Cargo.toml +++ b/backend/windmill-queue/Cargo.toml @@ -39,7 +39,6 @@ futures-core.workspace = true futures.workspace = true itertools.workspace = true async-recursion.workspace = true -bigdecimal.workspace = true axum.workspace = true serde_urlencoded.workspace = true regex.workspace = true diff --git a/backend/windmill-worker/src/ansible_executor.rs b/backend/windmill-worker/src/ansible_executor.rs index e21728943e500..fee2eaa119d6c 100644 --- a/backend/windmill-worker/src/ansible_executor.rs +++ b/backend/windmill-worker/src/ansible_executor.rs @@ -1,10 +1,5 @@ #[cfg(unix)] -use std::{ - collections::HashMap, - os::unix::fs::PermissionsExt, - path::PathBuf, - process::Stdio, -}; +use std::{collections::HashMap, os::unix::fs::PermissionsExt, path::PathBuf, process::Stdio}; #[cfg(windows)] use std::{ @@ -29,7 +24,8 @@ use windmill_queue::{append_logs, CanceledBy}; use crate::{ bash_executor::BIN_BASH, common::{ - check_executor_binary_exists, get_reserved_variables, read_and_check_result, start_child_process, transform_json, OccupancyMetrics + check_executor_binary_exists, get_reserved_variables, read_and_check_result, + start_child_process, transform_json, OccupancyMetrics, }, handle_child::handle_child, python_executor::{create_dependencies_dir, handle_python_reqs, uv_pip_compile, PyVersion}, @@ -201,7 +197,11 @@ pub async fn handle_ansible_job( envs: HashMap, occupancy_metrics: &mut OccupancyMetrics, ) -> windmill_common::error::Result> { - check_executor_binary_exists("ansible-playbook", ANSIBLE_PLAYBOOK_PATH.as_str(), "ansible")?; + check_executor_binary_exists( + "ansible-playbook", + ANSIBLE_PLAYBOOK_PATH.as_str(), + "ansible", + )?; let (logs, reqs, playbook) = windmill_parser_yaml::parse_ansible_reqs(inner_content)?; append_logs(&job.id, &job.workspace_id, logs, db).await; diff --git a/backend/windmill-worker/src/lib.rs b/backend/windmill-worker/src/lib.rs index d887f14d81a1f..cc34dd72ab72d 100644 --- a/backend/windmill-worker/src/lib.rs +++ b/backend/windmill-worker/src/lib.rs @@ -25,6 +25,8 @@ mod job_logger_ee; mod js_eval; #[cfg(feature = "mysql")] mod mysql_executor; +#[cfg(feature = "oracledb")] +mod oracledb_executor; mod pg_executor; #[cfg(feature = "php")] mod php_executor; @@ -36,8 +38,6 @@ mod rust_executor; mod worker; mod worker_flow; mod worker_lockfiles; -#[cfg(feature = "oracledb")] -mod oracledb_executor; pub use worker::*; pub use result_processor::handle_job_error; diff --git a/backend/windmill-worker/src/oracledb_executor.rs b/backend/windmill-worker/src/oracledb_executor.rs index fc5686568e1de..f6e64a60a1e6a 100644 --- a/backend/windmill-worker/src/oracledb_executor.rs +++ b/backend/windmill-worker/src/oracledb_executor.rs @@ -20,7 +20,7 @@ use windmill_parser_sql::{ use windmill_queue::CanceledBy; use crate::{ - common::{check_executor_binary_exists, build_args_map, OccupancyMetrics}, + common::{build_args_map, check_executor_binary_exists, OccupancyMetrics}, handle_child::run_future_with_polling_update_job_poller, AuthedClientBackgroundTask, }; @@ -302,7 +302,11 @@ pub async fn do_oracledb( column_order: &mut Option>, occupancy_metrics: &mut OccupancyMetrics, ) -> windmill_common::error::Result> { - check_executor_binary_exists("the Oracle client lib", ORACLE_LIB_DIR.as_str(), "Oracle Database")?; + check_executor_binary_exists( + "the Oracle client lib", + ORACLE_LIB_DIR.as_str(), + "Oracle Database", + )?; let args = build_args_map(job, client, db).await?.map(Json); let job_args = if args.is_some() { diff --git a/backend/windmill-worker/src/php_executor.rs b/backend/windmill-worker/src/php_executor.rs index d105881078891..7eb0d9542900c 100644 --- a/backend/windmill-worker/src/php_executor.rs +++ b/backend/windmill-worker/src/php_executor.rs @@ -15,7 +15,8 @@ use windmill_queue::{append_logs, CanceledBy}; use crate::{ common::{ - check_executor_binary_exists, create_args_and_out_file, get_main_override, get_reserved_variables, read_result, start_child_process, OccupancyMetrics + check_executor_binary_exists, create_args_and_out_file, get_main_override, + get_reserved_variables, read_result, start_child_process, OccupancyMetrics, }, handle_child::handle_child, AuthedClientBackgroundTask, COMPOSER_CACHE_DIR, COMPOSER_PATH, DISABLE_NSJAIL, DISABLE_NUSER, diff --git a/backend/windmill-worker/src/worker.rs b/backend/windmill-worker/src/worker.rs index 9499dd9eec739..0d657fbf16a4e 100644 --- a/backend/windmill-worker/src/worker.rs +++ b/backend/windmill-worker/src/worker.rs @@ -90,12 +90,26 @@ use tokio::{ use rand::Rng; use crate::{ - bash_executor::{handle_bash_job, handle_powershell_job}, bun_executor::handle_bun_job, common::{ + bash_executor::{handle_bash_job, handle_powershell_job}, + bun_executor::handle_bun_job, + common::{ build_args_map, cached_result_path, get_cached_resource_value_if_valid, get_reserved_variables, update_worker_ping_for_failed_init_script, OccupancyMetrics, - }, csharp_executor::handle_csharp_job, deno_executor::handle_deno_job, go_executor::handle_go_job, graphql_executor::do_graphql, handle_child::SLOW_LOGS, handle_job_error, job_logger::NO_LOGS_AT_ALL, js_eval::{eval_fetch_timeout, transpile_ts}, pg_executor::do_postgresql, result_processor::{process_result, start_background_processor}, worker_flow::{handle_flow, update_flow_status_in_progress}, worker_lockfiles::{ + }, + csharp_executor::handle_csharp_job, + deno_executor::handle_deno_job, + go_executor::handle_go_job, + graphql_executor::do_graphql, + handle_child::SLOW_LOGS, + handle_job_error, + job_logger::NO_LOGS_AT_ALL, + js_eval::{eval_fetch_timeout, transpile_ts}, + pg_executor::do_postgresql, + result_processor::{process_result, start_background_processor}, + worker_flow::{handle_flow, update_flow_status_in_progress}, + worker_lockfiles::{ handle_app_dependency_job, handle_dependency_job, handle_flow_dependency_job, - } + }, }; #[cfg(feature = "rust")] @@ -331,7 +345,6 @@ const DOTNET_DEFAULT_PATH: &str = "C:\\Program Files\\dotnet\\dotnet.exe"; #[cfg(unix)] const DOTNET_DEFAULT_PATH: &str = "/usr/bin/dotnet"; - lazy_static::lazy_static! { pub static ref JOB_TOKEN: Option = std::env::var("JOB_TOKEN").ok(); diff --git a/frontend/.prettierrc b/frontend/.prettierrc.json similarity index 100% rename from frontend/.prettierrc rename to frontend/.prettierrc.json diff --git a/frontend/src/lib/components/ApiConnectForm.svelte b/frontend/src/lib/components/ApiConnectForm.svelte index 680b54850e8c2..33c3ffb5e6a9d 100644 --- a/frontend/src/lib/components/ApiConnectForm.svelte +++ b/frontend/src/lib/components/ApiConnectForm.svelte @@ -24,8 +24,10 @@ let supabaseWizard = false async function isSupabaseAvailable() { - supabaseWizard = - ((await OauthService.listOauthConnects()) ?? {})['supabase_wizard'] != undefined + try { + supabaseWizard = + ((await OauthService.listOauthConnects()) ?? {})['supabase_wizard'] != undefined + } catch (error) {} } async function loadSchema() { if (!resourceTypeInfo) return diff --git a/frontend/src/lib/components/AppConnectInner.svelte b/frontend/src/lib/components/AppConnectInner.svelte index ddf6811fb945f..c6e4b1983ff94 100644 --- a/frontend/src/lib/components/AppConnectInner.svelte +++ b/frontend/src/lib/components/AppConnectInner.svelte @@ -75,7 +75,6 @@ let scopes: string[] = [] let extra_params: [string, string][] = [] - let path: string let description = '' diff --git a/frontend/src/lib/components/FlowBuilder.svelte b/frontend/src/lib/components/FlowBuilder.svelte index 35ff13f46ed6f..a0ff007d1b485 100644 --- a/frontend/src/lib/components/FlowBuilder.svelte +++ b/frontend/src/lib/components/FlowBuilder.svelte @@ -481,7 +481,14 @@ const selectedIdStore = writable(selectedId ?? 'settings-metadata') const selectedTriggerStore = writable< - 'webhooks' | 'emails' | 'schedules' | 'cli' | 'routes' | 'websockets' | 'scheduledPoll' + | 'webhooks' + | 'emails' + | 'schedules' + | 'cli' + | 'routes' + | 'websockets' + | 'postgres' + | 'scheduledPoll' >('webhooks') export function getSelectedId() { @@ -516,6 +523,7 @@ | 'cli' | 'routes' | 'websockets' + | 'postgres' | 'scheduledPoll' ) { selectedTriggerStore.set(selectedTrigger) diff --git a/frontend/src/lib/components/Path.svelte b/frontend/src/lib/components/Path.svelte index f042f17b761a8..c66f29c390d0f 100644 --- a/frontend/src/lib/components/Path.svelte +++ b/frontend/src/lib/components/Path.svelte @@ -16,6 +16,7 @@ VariableService, WebsocketTriggerService, KafkaTriggerService, + PostgresTriggerService, NatsTriggerService } from '$lib/gen' import { superadmin, userStore, workspaceStore } from '$lib/stores' @@ -40,6 +41,7 @@ | 'http_trigger' | 'websocket_trigger' | 'kafka_trigger' + | 'postgres_trigger' | 'nats_trigger' let meta: Meta | undefined = undefined export let fullNamePlaceholder: string | undefined = undefined @@ -234,6 +236,11 @@ workspace: $workspaceStore!, path: path }) + } else if (kind == 'postgres_trigger') { + return await PostgresTriggerService.existsPostgresTrigger({ + workspace: $workspaceStore!, + path: path + }) } else if (kind == 'nats_trigger') { return await NatsTriggerService.existsNatsTrigger({ workspace: $workspaceStore!, diff --git a/frontend/src/lib/components/ScriptBuilder.svelte b/frontend/src/lib/components/ScriptBuilder.svelte index 9679e3866e574..7722cce4614ff 100644 --- a/frontend/src/lib/components/ScriptBuilder.svelte +++ b/frontend/src/lib/components/ScriptBuilder.svelte @@ -6,10 +6,12 @@ type NewScriptWithDraft, ScheduleService, type Script, - type TriggersCount + type TriggersCount, + PostgresTriggerService } from '$lib/gen' import { inferArgs } from '$lib/infer' import { initialCode } from '$lib/script_helpers' + import { page } from '$app/stores' import { defaultScripts, enterpriseLicense, userStore, workspaceStore } from '$lib/stores' import { cleanValueProperties, @@ -230,18 +232,40 @@ $: !disableHistoryChange && replaceStateFn('#' + encodeState({ ...script, primarySchedule: $primaryScheduleStore })) - if (script.content == '') { initContent(script.language, script.kind, template) } - function initContent( + async function isTemplateScript() { + let getInitBlockTemplate = $page.url.searchParams.get('id') + if (getInitBlockTemplate === null) { + return undefined + } + try { + getInitBlockTemplate = await PostgresTriggerService.getTemplateScript({ + workspace: $workspaceStore!, + id: getInitBlockTemplate as string + }) + return getInitBlockTemplate + } catch (error) { + sendUserToast( + 'An error occured when trying to load your template script, please try again later', + true + ) + } + } + + async function initContent( language: SupportedLanguage, kind: Script['kind'] | undefined, template: 'pgsql' | 'mysql' | 'script' | 'docker' | 'powershell' | 'bunnative' ) { scriptEditor?.disableCollaboration() - script.content = initialCode(language, kind, template) + const templateScript = await isTemplateScript() + script.content = initialCode(language, kind, template, templateScript != undefined) + if (templateScript) { + script.content += '\r\n' + templateScript + } scriptEditor?.inferSchema(script.content, language, true) if (script.content != editor?.getCode()) { setCode(script.content) diff --git a/frontend/src/lib/components/ScriptPicker.svelte b/frontend/src/lib/components/ScriptPicker.svelte index 80ecbe54728bf..c55a56de66dcd 100644 --- a/frontend/src/lib/components/ScriptPicker.svelte +++ b/frontend/src/lib/components/ScriptPicker.svelte @@ -48,7 +48,10 @@ })) } else if (itemKind == 'script') { items = ( - await ScriptService.listScripts({ workspace: $workspaceStore!, kinds: kinds.join(',') }) + await ScriptService.listScripts({ + workspace: $workspaceStore!, + kinds: kinds.join(','), + }) ).map((script) => ({ value: script.path, label: `${script.path}${script.summary ? ` | ${truncate(script.summary, 20)}` : ''}` diff --git a/frontend/src/lib/components/Section.svelte b/frontend/src/lib/components/Section.svelte index 5daa694837cfe..4f36a96b0b4cf 100644 --- a/frontend/src/lib/components/Section.svelte +++ b/frontend/src/lib/components/Section.svelte @@ -6,6 +6,7 @@ export let label: string | undefined = undefined export let tooltip: string | undefined = undefined + export let documentationLink: string | undefined = undefined export let eeOnly = false export let small: boolean = false @@ -38,7 +39,7 @@ {#if tooltip} - {tooltip} + {tooltip} {/if} {#if eeOnly} {#if !$enterpriseLicense} diff --git a/frontend/src/lib/components/details/DetailPageDetailPanel.svelte b/frontend/src/lib/components/details/DetailPageDetailPanel.svelte index 09805297c35b0..1093161d367e3 100644 --- a/frontend/src/lib/components/details/DetailPageDetailPanel.svelte +++ b/frontend/src/lib/components/details/DetailPageDetailPanel.svelte @@ -12,6 +12,7 @@ | 'cli' | 'routes' | 'websockets' + | 'postgres' | 'scheduledPoll' | 'kafka' | 'nats' = 'webhooks' @@ -54,6 +55,7 @@ + diff --git a/frontend/src/lib/components/details/DetailPageLayout.svelte b/frontend/src/lib/components/details/DetailPageLayout.svelte index bce0bda95a462..27c2f46778664 100644 --- a/frontend/src/lib/components/details/DetailPageLayout.svelte +++ b/frontend/src/lib/components/details/DetailPageLayout.svelte @@ -26,6 +26,7 @@ | 'cli' | 'routes' | 'websockets' + | 'postgres' | 'scheduledPoll' | 'kafka' | 'nats' @@ -64,6 +65,7 @@ + @@ -110,6 +112,7 @@ + diff --git a/frontend/src/lib/components/details/DetailPageTriggerPanel.svelte b/frontend/src/lib/components/details/DetailPageTriggerPanel.svelte index 9eb1831cb89cb..c5d4938828bac 100644 --- a/frontend/src/lib/components/details/DetailPageTriggerPanel.svelte +++ b/frontend/src/lib/components/details/DetailPageTriggerPanel.svelte @@ -24,6 +24,7 @@ | 'routes' | 'websockets' | 'kafka' + | 'postgres' | 'nats' | 'scheduledPoll' = 'webhooks' export let simplfiedPoll: boolean = false @@ -66,6 +67,12 @@ Websockets + + + + Postgres + + @@ -97,6 +104,8 @@ {:else if triggerSelected === 'websockets'} + {:else if triggerSelected === 'postgres'} + {:else if triggerSelected === 'kafka' || triggerSelected === 'nats'}

diff --git a/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte b/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte index a38d3659fe8b7..b3994a707e553 100644 --- a/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte +++ b/frontend/src/lib/components/graph/renderers/triggers/TriggersBadge.svelte @@ -1,5 +1,5 @@ + +{#if open} + +{/if} diff --git a/frontend/src/lib/components/triggers/postgres/PostgresTriggerEditorInner.svelte b/frontend/src/lib/components/triggers/postgres/PostgresTriggerEditorInner.svelte new file mode 100644 index 0000000000000..29bbbf0acd519 --- /dev/null +++ b/frontend/src/lib/components/triggers/postgres/PostgresTriggerEditorInner.svelte @@ -0,0 +1,596 @@ + + + + + + {#if !drawerLoading && can_write} + {#if edit} +
+ { + await PostgresTriggerService.setPostgresTriggerEnabled({ + path: initialPath, + workspace: $workspaceStore ?? '', + requestBody: { enabled: e.detail } + }) + sendUserToast( + `${e.detail ? 'enabled' : 'disabled'} postgres trigger ${initialPath}` + ) + }} + /> +
+ {/if} + + {/if} +
+ {#if drawerLoading} + + {:else} +
+ + {#if edit} + Changes can take up to 30 seconds to take effect. + {:else} + New postgres triggers can take up to 30 seconds to start listening. + {/if} + +
+
+
+ +
+ +
+

+ Pick a database to connect to +

+
+ + {#if postgres_resource_path} + + {#if config.show} + + {#if config.isLogical} + Your database is correctly configured with logical replication enabled. You can + proceed with using the streaming feature + {:else} + Logical replication is not enabled on your database. To use this feature, your + Postgres database must have wal_level configured as 'logical' in your + database configuration. + {/if} + + {/if} + {/if} +
+
+
+

+ Pick a script or flow to be triggered +

+
+ + + {#if script_path === undefined && is_flow === false} +
+ +
+ {/if} +
+
+ {#if postgres_resource_path} +
+
+

+ Choose which table of your database to track as well as what kind of transaction + should fire the script.
+ You must pick a database resource first to make the configuration of your trigger + +

+
+

+ Choose the types of database transactions that should trigger a script or flow. + You can select from Insert, Update, + Delete, or any combination of these operations to define when the + trigger should activate. +

+ +
+
+

+ Select the tables to track. You can choose to track + all tables in your database, + all tables within a specific schema, + specific tables in a schema, or even + specific columns of a table. Additionally, you can apply a + filter to retrieve only rows that do not match the specified criteria. +

+ +
Basic

Choose the relations to track without worrying about the + underlying mechanics of creating a + publication + or slot. This simplified option lets you focus only on + the data you want to monitor.

+
Advanced

Select a specific publication from your database to + track, and manage it by creating, + updating, or deleting. For + slots, you can create or + delete + them. Both non-active slots and the + currently used slot by the trigger will be retrieved from + your database for management.

+ +
+ + + + +
+
+ { + replication_slot_name = '' + }} + > + + + + {#if selectedSlotAction === 'create'} +
+ + +
+ {:else} + + {/if} +
+
+ +
+
+ { + if (selectedPublicationAction === 'create') { + selectedTable = 'specific' + publication_name = `windmill_publication_${random_adj()}` + relations = [{ schema_name: 'public', table_to_track: [] }] + return + } + + publication_name = '' + relations = [] + transaction_to_track = [] + }} + > + + + + {#if selectedPublicationAction === 'create'} +
+ + +
+ {:else} + + {/if} + +
+
+
+
+
+
+
+
+
+ {/if} +
+ {/if} +
+
diff --git a/frontend/src/lib/components/triggers/postgres/PostgresTriggersPanel.svelte b/frontend/src/lib/components/triggers/postgres/PostgresTriggersPanel.svelte new file mode 100644 index 0000000000000..ffc8b4e45e176 --- /dev/null +++ b/frontend/src/lib/components/triggers/postgres/PostgresTriggersPanel.svelte @@ -0,0 +1,103 @@ + + + { + loadTriggers() + }} + bind:this={postgresTriggerEditor} +/> + +
+ {#if !newItem} + {#if isCloudHosted()} + + Postgres triggers are disabled in the multi-tenant cloud. + + {:else if $userStore?.is_admin || $userStore?.is_super_admin} + + {:else} + + {/if} + {/if} + + {#if databaseTriggers} + {#if databaseTriggers.length == 0} +
No Postgres triggers
+ {:else} +
+ {#each databaseTriggers as databaseTriggers (databaseTriggers.path)} +
+
{databaseTriggers.path}
+
+ +
+
+ {/each} +
+ {/if} + {:else} + + {/if} + + {#if newItem} + + Deploy the {isFlow ? 'flow' : 'script'} to add Postgres triggers. + + {/if} +
diff --git a/frontend/src/lib/components/triggers/postgres/PublicationPicker.svelte b/frontend/src/lib/components/triggers/postgres/PublicationPicker.svelte new file mode 100644 index 0000000000000..86f035f691d5f --- /dev/null +++ b/frontend/src/lib/components/triggers/postgres/PublicationPicker.svelte @@ -0,0 +1,132 @@ + + + + +
+ + + {#each v.table_to_track as table_to_track, j} +
+
+ + + + + +
+
+ {/each} + +
+ +
+ {/each} + {/if} +
+ +
+ {/if} + diff --git a/frontend/src/lib/components/triggers/postgres/SlotPicker.svelte b/frontend/src/lib/components/triggers/postgres/SlotPicker.svelte new file mode 100644 index 0000000000000..b0a22b5907078 --- /dev/null +++ b/frontend/src/lib/components/triggers/postgres/SlotPicker.svelte @@ -0,0 +1,98 @@ + + + + +
+ +
+
Filter by path of
+ + + + +
+ + +
+ {#if $userStore?.is_super_admin && $userStore.username.includes('@')} + + {:else if $userStore?.is_admin || $userStore?.is_super_admin} + + {/if} +
+
+ {#if loading} + {#each new Array(6) as _} + + {/each} + {:else if !triggers?.length} +
No postgres triggers
+ {:else if items?.length} +
+ {#each items.slice(0, nbDisplayed) as { postgres_resource_path, path, edited_by, error, edited_at, script_path, is_flow, extra_perms, canWrite, enabled, server_id } (path)} + {@const href = `${is_flow ? '/flows/get' : '/scripts/get'}/${script_path}`} + {@const ping = new Date()} + {@const pinging = ping && ping.getTime() > new Date().getTime() - 15 * 1000} + +
+
+ + + postgresTriggerEditor?.openEdit(path, is_flow)} + class="min-w-0 grow hover:underline decoration-gray-400" + > +
+ {path} +
+
+ {postgres_resource_path} +
+
+ runnable: {script_path} +
+
+ + + +
+ {#if (enabled && (!pinging || error)) || (!enabled && error) || (enabled && !server_id)} + + + + + +
+ {#if enabled} + {#if !server_id} + Postgres trigger is starting... + {:else} + Could not connect to database{error ? ': ' + error : ''} + {/if} + {:else} + Disabled because of an error: {error} + {/if} +
+
+ {:else if enabled} + + + + +
+ Connected to database{!server_id ? ' (shutting down...)' : ''}
+
+ {/if} +
+ + { + setTriggerEnabled(path, e.detail) + }} + /> + +
+ + { + goto(href) + } + }, + { + displayName: 'Delete', + type: 'delete', + icon: Trash, + disabled: !canWrite, + action: async () => { + await PostgresTriggerService.deletePostgresTrigger({ + workspace: $workspaceStore ?? '', + path + }) + loadTriggers() + } + }, + { + displayName: canWrite ? 'Edit' : 'View', + icon: canWrite ? Pen : Eye, + action: () => { + postgresTriggerEditor?.openEdit(path, is_flow) + } + }, + { + displayName: 'Audit logs', + icon: Eye, + href: `${base}/audit_logs?resource=${path}` + }, + { + displayName: canWrite ? 'Share' : 'See Permissions', + icon: Share, + action: () => { + shareModal.openDrawer(path, 'websocket_trigger') + } + } + ]} + /> +
+
+
+
edited by {edited_by}
the {displayDate(edited_at)}
+
+ {/each} +
+ {:else} + + {/if} + + {#if items && items?.length > 15 && nbDisplayed < items.length} + {nbDisplayed} items out of {items.length} + + {/if} + + + { + loadTriggers() + }} +/> diff --git a/frontend/src/routes/(root)/(logged)/routes/+page.svelte b/frontend/src/routes/(root)/(logged)/routes/+page.svelte index 2aae39245d32c..6666df29ec51e 100644 --- a/frontend/src/routes/(root)/(logged)/routes/+page.svelte +++ b/frontend/src/routes/(root)/(logged)/routes/+page.svelte @@ -6,7 +6,6 @@ import { Button, Skeleton } from '$lib/components/common' import Dropdown from '$lib/components/DropdownV2.svelte' import PageHeader from '$lib/components/PageHeader.svelte' - import RouteEditor from '$lib/components/triggers/RouteEditor.svelte' import SharedBadge from '$lib/components/SharedBadge.svelte' import ShareModal from '$lib/components/ShareModal.svelte' import Toggle from '$lib/components/Toggle.svelte' @@ -21,6 +20,7 @@ import ToggleButton from '$lib/components/common/toggleButton-v2/ToggleButton.svelte' import { setQuery } from '$lib/navigation' import { onMount } from 'svelte' + import RouteEditor from '$lib/components/triggers/http/RouteEditor.svelte' type TriggerW = HttpTrigger & { canWrite: boolean } diff --git a/frontend/src/routes/(root)/(logged)/scripts/get/[...hash]/+page.svelte b/frontend/src/routes/(root)/(logged)/scripts/get/[...hash]/+page.svelte index 113dda4b99cc5..adb7e732033f2 100644 --- a/frontend/src/routes/(root)/(logged)/scripts/get/[...hash]/+page.svelte +++ b/frontend/src/routes/(root)/(logged)/scripts/get/[...hash]/+page.svelte @@ -18,7 +18,12 @@ } from '$lib/utils' import Tooltip from '$lib/components/Tooltip.svelte' import ShareModal from '$lib/components/ShareModal.svelte' - import { enterpriseLicense, hubBaseUrlStore, userStore, workspaceStore } from '$lib/stores' + import { + enterpriseLicense, + hubBaseUrlStore, + userStore, + workspaceStore + } from '$lib/stores' import { isDeployable, ALL_DEPLOYABLE } from '$lib/utils_deployable' import { onDestroy } from 'svelte' @@ -42,7 +47,7 @@ import DeployWorkspaceDrawer from '$lib/components/DeployWorkspaceDrawer.svelte' import SavedInputs from '$lib/components/SavedInputs.svelte' - import WebhooksPanel from '$lib/components/triggers/WebhooksPanel.svelte' + import WebhooksPanel from '$lib/components/triggers/webhook/WebhooksPanel.svelte' import DetailPageLayout from '$lib/components/details/DetailPageLayout.svelte' import DetailPageHeader from '$lib/components/details/DetailPageHeader.svelte' import CliHelpBox from '$lib/components/CliHelpBox.svelte' @@ -78,14 +83,15 @@ import EmailTriggerPanel from '$lib/components/details/EmailTriggerPanel.svelte' import Star from '$lib/components/Star.svelte' import LogViewer from '$lib/components/LogViewer.svelte' - import RoutesPanel from '$lib/components/triggers/RoutesPanel.svelte' + import RoutesPanel from '$lib/components/triggers/http/RoutesPanel.svelte' import { Highlight } from 'svelte-highlight' import json from 'svelte-highlight/languages/json' import { writable } from 'svelte/store' import TriggersBadge from '$lib/components/graph/renderers/triggers/TriggersBadge.svelte' - import WebsocketTriggersPanel from '$lib/components/triggers/WebsocketTriggersPanel.svelte' - import KafkaTriggersPanel from '$lib/components/triggers/KafkaTriggersPanel.svelte' - import NatsTriggersPanel from '$lib/components/triggers/NatsTriggersPanel.svelte' + import WebsocketTriggersPanel from '$lib/components/triggers/websocket/WebsocketTriggersPanel.svelte' + import KafkaTriggersPanel from '$lib/components/triggers/kafka/KafkaTriggersPanel.svelte' + import NatsTriggersPanel from '$lib/components/triggers/nats/NatsTriggersPanel.svelte' + import PostgresTriggersPanel from '$lib/components/triggers/postgres/PostgresTriggersPanel.svelte' let script: Script | undefined let topHash: string | undefined @@ -728,6 +734,10 @@ + +
+ +
diff --git a/frontend/src/routes/(root)/(logged)/websocket_triggers/+page.svelte b/frontend/src/routes/(root)/(logged)/websocket_triggers/+page.svelte index 2009ca3b8f6a3..76d2a34da3895 100644 --- a/frontend/src/routes/(root)/(logged)/websocket_triggers/+page.svelte +++ b/frontend/src/routes/(root)/(logged)/websocket_triggers/+page.svelte @@ -26,9 +26,9 @@ import ToggleButton from '$lib/components/common/toggleButton-v2/ToggleButton.svelte' import { setQuery } from '$lib/navigation' import { onDestroy, onMount } from 'svelte' - import WebsocketTriggerEditor from '$lib/components/triggers/WebsocketTriggerEditor.svelte' import Popover from '$lib/components/Popover.svelte' import { isCloudHosted } from '$lib/cloud' + import WebsocketTriggerEditor from '$lib/components/triggers/websocket/WebsocketTriggerEditor.svelte' type TriggerW = WebsocketTrigger & { canWrite: boolean } @@ -233,7 +233,7 @@
Filter by path of
- +