From d043808aedd7213c8dbaf6055a64c6ca4b44e3ac Mon Sep 17 00:00:00 2001 From: Abel Lucas Date: Fri, 24 Jan 2025 08:19:12 +0100 Subject: [PATCH] backend: use v2 tables through views where possible (v2 phase 3) --- ...7e3244ebb2cb864828637e7ff7476a4a4939b.json | 22 + ...de74e8b0943d30481465da453942e299a128f.json | 15 + ...b3019bcc501362fda2ed35dd3037c3e8a6eb7.json | 40 ++ ...c540e129eb27f869431bcf8182585c8ba699c.json | 15 + ...2878e9f2a13e0ce0dad42723f95ac7fb15d4b.json | 23 + ...4210d39f405016ffec374e4b15a2528baccb5.json | 198 +++++++ ...d52dbd4c922436edacced18b9620c70e0cc8b.json | 24 + ...ea9916bced29ad1b993ce950c586c11df5a1d.json | 33 ++ ...174646b6bdd658b8461db1bb90a871d076718.json | 15 + ...14d3a73947cf4b365b972901fc5814fc31272.json | 15 + ...fdce4ad1a487afcd62735ef25386586cfc036.json | 15 + ...fb98f871164756528f6ec2a90f3694e650165.json | 14 + ...bb0c08ae6d91b05b2f3f7c89a990d1d5a5f8a.json | 22 + ...57b50b781da41cb6f952beafabd042da11fc3.json | 29 ++ ...b65cdcd277257192ee0a6d18a00f41bce49d4.json | 22 + ...8fed0c55ffee22d7eecfcc7e2b4f470448597.json | 23 + ...34930a56ea3de29c0f279e9480434336f4914.json | 15 + ...b281a9bd7f5431c646c6268513751fff95395.json | 29 ++ ...c5b65a2a862adafcca3769eb5a13e08d2805b.json | 15 + ...37136fd123f80389abe6ebbcf12084da45868.json | 14 + ...1807b5e3415c28aacbf09cf7bac58bb1d8470.json | 22 + ...b40b2a9574f548e3ee60bb08926b017f6d657.json | 92 ++++ ...79aecc2361134a4817c2b9580f2680425352d.json | 23 + ...da6d734919a95c109822c30f4d49691b3c6b8.json | 15 + ...8cffbcc3334bada244a331a0bd8db06029d42.json | 22 + ...0e6b77e24bf5a9044fdc5284d0d7f1e14eafa.json | 15 + ...662a9c3c16db9f328cf45eace40cd73c1acaa.json | 16 + ...fe96ff91951fc8486d1f0f5733b8e63f043bc.json | 14 + ...59d0e223b98f3ca84fdff21b1bc57c2ca3512.json | 14 + ...93338faeba38b788245af20f815de4125fc4e.json | 17 + ...e360e389da8c7f653ef8e6bc0d30c823aea51.json | 24 + ...2fbac2fead0e1bff4e909fd7fb1a41bc35d8f.json | 41 ++ ...6b2fe3f690da7bc8dfd36b47ec619c4e31995.json | 54 ++ ...030eb195900929a10632c36f7b03b6bb212a4.json | 16 + ...543252891c53351bdc98da66cc30ffc895866.json | 22 + ...0f9d3de633cdc4f9e23c6d9bd27eba07b0cb2.json | 22 + ...6c876750c85075cfda816c025c805d4c3cd4c.json | 42 ++ ...b5309320e484359a8b73bc6b3b2e9bc2fd651.json | 16 + ...0ed30e7d404befdd9d556bdc9cde3ab6f790f.json | 16 + ...8cd0e58c8d534ac22ec0356d82a854b31d087.json | 15 + ...06caccb849db9e0f71d86da655b01c6a3e8d0.json | 27 + ...d35d6d711b05d6eefffdc8e6a1b9c9bc7085d.json | 34 ++ ...f867595813bfa1ae0b26bc4181780801294bf.json | 22 + ...291b3294a6a7d1549d752f14acf5972552ba5.json | 15 + ...5a383f8a20e98b8c502929f4dc5041a55e72f.json | 24 + ...7e1ee949cf9108239032cb3addbf350fb33de.json | 67 +++ ...3709851edd9db133494f2ff07e40c020f4be1.json | 75 +++ ...67d88ddbdfe81237b72d5efbc630278e44bb5.json | 23 + ...9008b382c445476117a2311030734bc6d6d53.json | 14 + ...852b49429f42e394aeec9fb82c53f8bcadc54.json | 34 ++ ...ca10b275653e10ea4aa17a8ef5091ca09294a.json | 29 ++ ...1436fb133f0b3c97abb1267a9c12326dd1a33.json | 30 ++ ...73917ed13269f2ee20b86df79fca2c8efe672.json | 90 ++++ ...4865c60310ef666fc1170d64df50548dddf7b.json | 23 + ...cba3a2bc575dc69f6aa01fc6df1b5d095ab41.json | 14 + ...0de3797d671a7c0395d16eec8a2f84c1fe3d1.json | 14 + ...237081246a2985eec1d0d933dfedf634a7191.json | 28 + ...92ef0ad30e39eec59c27bae8cb0622062c8fb.json | 20 + ...0d6109d2a6633e62ce708bad8af1a9f8c3925.json | 15 + ...122cfeeeebf5a9392c6798a486274d8d233d5.json | 24 + ...d539d8fac138f409da7f712fcba0b21c572ab.json | 17 + ...82f4623c74e61759dda257bd1f633f1a2b725.json | 23 + ...2ebd56e13193395701e26328a243055bee6b8.json | 22 + ...d7634fb3aee2af76eb9c7a2abe238159c3e22.json | 15 + ...e4e429ebce64a4d5c16b0f136142ad213cdb1.json | 22 + ...52c0359cc618af85b5d86ff8e36ae381d5c69.json | 29 ++ ...1480681a623eba0ca97d690c50f1c4cd1613b.json | 25 + ...d294361808a1f8d45885ea772ae553d540f7c.json | 16 + ...35270cf56aa9bf7af16b198659d638a4d86eb.json | 15 + ...68b623a2fb015e6cc1baa4e3b974a8bc458f6.json | 69 +++ ...54642d877178ce2c0e73b72c3824135ef86f4.json | 14 + ...0a69f911d47d7c71c6021112900f7fd560c85.json | 22 + ...eca212d91bb79365a117d754f1d47d8f1ba23.json | 14 + ...0fa742351c5c180ac3de106df54f7badb494c.json | 22 + ...9cbf0df8221a7734a0d97803e30071cc87566.json | 16 + ...026d4d4e0d381b31fe02da7d2667c0cdc1a85.json | 15 + ...2f3aa0e058bf353fee1b0755a798e00061ccf.json | 25 + ...d6b490628f502ed9f9de75cc78d0fe196f260.json | 23 + ...67aa29e2f0fdac3f1984550484d5a06a6ea21.json | 29 ++ ...d437d1e32d4b5bdc7f7fae22e804428660836.json | 25 + ...488bee8b3e3fc8ee25a513c10b1376d69f81a.json | 15 + ...5af4f372c07263c6f86bc89b0d25cb573fa80.json | 14 + ...850e02ffbef25fbfd29cbedc041b0e439e580.json | 29 ++ ...94304872cf2136b15076f6712a77d12a648f1.json | 14 + ...df7de61fa9953998e3dc66af0e5f7491a65f4.json | 14 + ...aa669a153ab0c0bb2807cd4f7fd405afa6f69.json | 73 +++ ...db4007fb9dc6fd20582a46ebb951fca3a7abd.json | 22 + ...1ea1747ffe28986055c01a4661628e47c0e51.json | 16 + ...02be4b9550b043240b2076e105976fedb789c.json | 25 + ...277948c1f579ef0f6419a113f38ee71048077.json | 28 + ...87095542490fbb4aae30ec7fa75c2dae98ec8.json | 15 + ...0c36c3726b2da9ff3592589ac7e83df1c537c.json | 29 ++ ...2a540f1d147948e1b1a7523b21151ffa22305.json | 38 ++ ...574fc2cbcb32fef9f44632a8bc4c5c487fc4f.json | 24 + ...572d971d5892b7dd4c8e824b953113bd6c4a9.json | 23 + ...bb707637a9bb1475711f9f9f3f2a6e28cb169.json | 15 + ...42119537374a815c1292fac4c5b75bdd58898.json | 16 + ...c38fc64deb1226aab9dc3bc4465324fce37d1.json | 16 + ...d85b7b8667c1ae76c4c7bd2db52dc5a8c7778.json | 15 + ...6f8377ec190669e2e22f8d511871d6fbe07b8.json | 38 ++ ...42788dd8a777d643d46d5086b5f8f33bbc97e.json | 12 + ...746692c1112f524727fcc0d56479adca15011.json | 17 + ...e754a0d64bc934a2b6434b9cb67f28d3ef950.json | 16 + ...e5a3d1a91ea02de7608676575e1c03023ed71.json | 29 ++ ...f93697ee1edcee0acf4a0684a28ff66ef735a.json | 48 ++ ...8b22231dab9c558723cc54894597ce4cd3d5a.json | 42 ++ ...b2d2deae208d7e95dd7d39884b7058a0512ff.json | 15 + ...7b8d9ae95c194b201bd1ce1d8f90925d7dba8.json | 16 + ...e18e8dfbdb0bf1dd4c0dc07a55f4eeb4eb5f8.json | 24 + ...b9e59aba53d8e17f1d8a887128b7863c3924d.json | 14 + ...492e706159b2bc00da9758cccd3c4bcf2a918.json | 14 + ...2529d57faf5e77f6792d5bda608ff9658d7c9.json | 24 + ...5188d40b02b73c12a7905d59edaefc30e8872.json | 15 + ...b534f885da45c18866731676d5b797d385ce1.json | 23 + ...f1bfbe4e2db85da9a478a07460733999e9beb.json | 20 + ...676407f78dcf26f38c93e6097854561e5e2c0.json | 68 +++ ...cc151604feae83d4671dfb9ca77eb8fd8d4f1.json | 14 + ...41b7852d809ee6b2b39581cfc9ea1bff82c2d.json | 15 + ...be64c1244f0382e7a7f81008f6d1f37d408fb.json | 25 + ...43199a18885e1739a5a0e7f6100eab6f3c803.json | 14 + ...4c577ce2a41c932dc285ddd6e82a732662e1c.json | 15 + ...c4cc6a3648992b590c6e0121e4d7434da516b.json | 16 + ...5ef756b8e5c1955fbe111df9ee171dc262338.json | 84 +++ ...675393a6bb27f8e14245410751961218a7df5.json | 30 ++ ...307daeb0c095e07063c57b26ab94211bf6359.json | 23 + ...ed2d4534e08471a60005bff85b952874c54c2.json | 71 +++ ...1555d6e088cb981b230d1cbbf8e8b832cad43.json | 15 + ...212a5bd4039b57fab20b163617e33a4c9dd46.json | 14 + ...1fef4daa669ec43ca3f457cc2c473b8e30b08.json | 30 ++ ...58c1aca73c0d750f589facccfcf73f9520861.json | 15 + ...41a254e00e84030aefff91a7b420fe7513b0d.json | 24 + ...4ed90a18a3c952783c419707ffbb38e232ce0.json | 26 + ...f149ae8c04beb9fcbff60c5ee65cf71b44927.json | 16 + ...06c6736556f0a14b489d6708f9879393f9ea3.json | 28 + ...2c80d00f52a3fc32bf32271ebc90f7837abda.json | 15 + ...d0b6ceae9de456d3ddb6008707589dbe30747.json | 22 + ...92b05105c261976cce78c77d7f85abf9ad40c.json | 14 + ...1f3615f4439c090a75e6fde03702a21589b25.json | 23 + ...4f50c4aec75759cb928bba9e2e6a3a5b06edd.json | 17 + ...5eadedad1a29049f8023e90deb57b19a41c72.json | 22 + ...6e1d802dd5d82e1cd5f5ed3a15cbc75eb6745.json | 23 + ...4e5d414cf439e4db3325b5bf6bbeb86c5bd2a.json | 15 + ...45c83f8b069a52f681f14afb4931ac77baf45.json | 40 ++ ...bd41c7b823186ac056a0a676da85dc5d9a027.json | 24 + ...cf967033942e489831cb5911723af1ec5161e.json | 22 + ...2b7593cb8eed2a97411697819849958c022b3.json | 23 + ...aeca87ed9764bfa683b9d49294c24495ad392.json | 22 + ...1eacb8fdccab2bd7dec3fcc5c08f7685734d2.json | 16 + ...7c5efa273188771db61fb0632c88e8b5c8558.json | 15 + ...fff455f3191f152e0900e09b71abb90d074ab.json | 29 ++ ...a1aea7f410ef204ec3465f4fb6c9acd256c95.json | 15 + ...39d688c979cff8c0bdd02e4ec91891bf0b3a6.json | 25 + ...8c0213ae4f66fdcc3a486120bb5116b1ce086.json | 14 + ...ec4dd12900340d7ffcc076a4f969e0b73c4a8.json | 40 ++ ...5a7c20cf22ca7b31334a173bafafa760e1a78.json | 15 + ...42ab25883b53a81ebb581fe019af2ec5eb567.json | 58 +++ ...634b90631242e3e3991659a785ff1256d5f4d.json | 22 + ...7bcffe58dfc26ad4b6ad56390105fded5034f.json | 17 + ...5590d5dea8c67f6ae7b14743ac4f265dd89a3.json | 14 + ...17c9209817a9cd8eb3bcd05a74f55423054ae.json | 30 ++ ...90850047653549ee20870e6272cb7d4cfb9aa.json | 23 + ...bbd7ec86610ddc8589bf5b606fab0947c8b75.json | 91 ++++ ...de9a730290525862e4ac27dccfdc51ee10093.json | 70 +++ ...42d909456d28a98563923cca43e760862e5e0.json | 24 + ...942d30ecbf3f7097cb25f5ad5841756e3e61e.json | 22 + ...f587c9581ef407417ec979b8a3ab07816cc69.json | 16 + ...f8268962b79a6884d7011fd02fd4809eede10.json | 16 + ...b800b8efd647121b538deb023f96dbaac3715.json | 22 + ...77afbd8b3a660b3be27514b517c077c63c238.json | 84 +++ backend/ee-repo-ref.txt | 2 +- ...45630_v2_queue_compatibility_view.down.sql | 2 + ...7145630_v2_queue_compatibility_view.up.sql | 50 ++ ..._completed_job_compatibility_view.down.sql | 2 + ...v2_completed_job_compatibility_view.up.sql | 39 ++ backend/src/monitor.rs | 67 ++- backend/tests/fixtures/base.sql | 4 +- backend/tests/fixtures/result_format.sql | 6 + backend/tests/worker.rs | 51 +- backend/windmill-api/src/apps.rs | 4 +- .../windmill-api/src/concurrency_groups.rs | 8 +- backend/windmill-api/src/flows.rs | 8 +- backend/windmill-api/src/inputs.rs | 34 +- backend/windmill-api/src/job_metrics.rs | 2 +- backend/windmill-api/src/jobs.rs | 295 ++++++----- backend/windmill-api/src/resources.rs | 4 +- backend/windmill-api/src/schedule.rs | 23 +- backend/windmill-api/src/slack_approvals.rs | 22 +- backend/windmill-api/src/users.rs | 61 +-- .../windmill-api/src/websocket_triggers.rs | 2 +- backend/windmill-api/src/workspaces_extra.rs | 20 +- backend/windmill-common/src/bench.rs | 61 ++- backend/windmill-common/src/cache.rs | 2 +- backend/windmill-common/src/jobs.rs | 8 +- backend/windmill-common/src/queue.rs | 2 +- backend/windmill-common/src/worker.rs | 102 ++-- backend/windmill-queue/src/jobs.rs | 491 ++++++++---------- backend/windmill-queue/src/schedule.rs | 2 +- backend/windmill-worker/src/common.rs | 4 +- backend/windmill-worker/src/handle_child.rs | 30 +- .../windmill-worker/src/python_executor.rs | 47 +- backend/windmill-worker/src/worker.rs | 24 +- backend/windmill-worker/src/worker_flow.rs | 386 +++++++------- .../windmill-worker/src/worker_lockfiles.rs | 4 +- 203 files changed, 5525 insertions(+), 851 deletions(-) create mode 100644 backend/.sqlx/query-00b6340396f5121aff49323d12a7e3244ebb2cb864828637e7ff7476a4a4939b.json create mode 100644 backend/.sqlx/query-00e63eab76d26e148b77e932848de74e8b0943d30481465da453942e299a128f.json create mode 100644 backend/.sqlx/query-01ece1b2ec8ec9a69e0ab21f439b3019bcc501362fda2ed35dd3037c3e8a6eb7.json create mode 100644 backend/.sqlx/query-02b06e7ffde8896c46603d830ccc540e129eb27f869431bcf8182585c8ba699c.json create mode 100644 backend/.sqlx/query-05d6405b2cc6aabf564a10f05402878e9f2a13e0ce0dad42723f95ac7fb15d4b.json create mode 100644 backend/.sqlx/query-099894523449a70eb301ecd1d744210d39f405016ffec374e4b15a2528baccb5.json create mode 100644 backend/.sqlx/query-0bc1c617786bb2fdc71b85442b1d52dbd4c922436edacced18b9620c70e0cc8b.json create mode 100644 backend/.sqlx/query-0c3d481e35e0b4eb72e391b4cffea9916bced29ad1b993ce950c586c11df5a1d.json create mode 100644 backend/.sqlx/query-0ea5ba568ec0f62b808fe938a41174646b6bdd658b8461db1bb90a871d076718.json create mode 100644 backend/.sqlx/query-0f01cbab131eebc610c0814d97814d3a73947cf4b365b972901fc5814fc31272.json create mode 100644 backend/.sqlx/query-0f23535bdfe222eee5a27d52fe4fdce4ad1a487afcd62735ef25386586cfc036.json create mode 100644 backend/.sqlx/query-11aa92258d927ff46931730df56fb98f871164756528f6ec2a90f3694e650165.json create mode 100644 backend/.sqlx/query-11db65c493990f6935103033b2fbb0c08ae6d91b05b2f3f7c89a990d1d5a5f8a.json create mode 100644 backend/.sqlx/query-1260543e36028c1322c60ce028657b50b781da41cb6f952beafabd042da11fc3.json create mode 100644 backend/.sqlx/query-12828c9b2964f2b484a68de1e01b65cdcd277257192ee0a6d18a00f41bce49d4.json create mode 100644 backend/.sqlx/query-12be3655e9c916598f83ae990c18fed0c55ffee22d7eecfcc7e2b4f470448597.json create mode 100644 backend/.sqlx/query-1959f48940ad2870b7cc67be19f34930a56ea3de29c0f279e9480434336f4914.json create mode 100644 backend/.sqlx/query-19cc8499f682ec34d54bc4f694cb281a9bd7f5431c646c6268513751fff95395.json create mode 100644 backend/.sqlx/query-1a2ee2ac2231e60cd5b862ea7c6c5b65a2a862adafcca3769eb5a13e08d2805b.json create mode 100644 backend/.sqlx/query-1a85ca0a6d0ba5ab3462907e35037136fd123f80389abe6ebbcf12084da45868.json create mode 100644 backend/.sqlx/query-1a9984fa378634f7f12356407831807b5e3415c28aacbf09cf7bac58bb1d8470.json create mode 100644 backend/.sqlx/query-1af2ef2d22d344687756681bd16b40b2a9574f548e3ee60bb08926b017f6d657.json create mode 100644 backend/.sqlx/query-1b58b90c184ca21d777ea4e264c79aecc2361134a4817c2b9580f2680425352d.json create mode 100644 backend/.sqlx/query-1d4718cbb0b5b66b91e8fffb6c0da6d734919a95c109822c30f4d49691b3c6b8.json create mode 100644 backend/.sqlx/query-1d819b829cd92995c39d29540df8cffbcc3334bada244a331a0bd8db06029d42.json create mode 100644 backend/.sqlx/query-1d87f41fd1abb9361d795a899120e6b77e24bf5a9044fdc5284d0d7f1e14eafa.json create mode 100644 backend/.sqlx/query-1ed4fbeafe678ff19100cb3e892662a9c3c16db9f328cf45eace40cd73c1acaa.json create mode 100644 backend/.sqlx/query-202e580eacc7907bb34b6464a66fe96ff91951fc8486d1f0f5733b8e63f043bc.json create mode 100644 backend/.sqlx/query-205e4faf72177cae685c7f0727659d0e223b98f3ca84fdff21b1bc57c2ca3512.json create mode 100644 backend/.sqlx/query-2104c634340c9289f0130a649b793338faeba38b788245af20f815de4125fc4e.json create mode 100644 backend/.sqlx/query-241270e20c751806dece12fbc2de360e389da8c7f653ef8e6bc0d30c823aea51.json create mode 100644 backend/.sqlx/query-2456fc71fc7a0758a4c1fbe77d72fbac2fead0e1bff4e909fd7fb1a41bc35d8f.json create mode 100644 backend/.sqlx/query-25d05a1e10d1aaa3f7c3c3bea5f6b2fe3f690da7bc8dfd36b47ec619c4e31995.json create mode 100644 backend/.sqlx/query-26bee60d5311b1454d2d14ddf7b030eb195900929a10632c36f7b03b6bb212a4.json create mode 100644 backend/.sqlx/query-280a361076d1c6317610765960f543252891c53351bdc98da66cc30ffc895866.json create mode 100644 backend/.sqlx/query-28c40cfbe796c6fb942cc9f72e00f9d3de633cdc4f9e23c6d9bd27eba07b0cb2.json create mode 100644 backend/.sqlx/query-2c5a1b1ebd872ce8a03a1b7e5246c876750c85075cfda816c025c805d4c3cd4c.json create mode 100644 backend/.sqlx/query-2e66f54a02c66b0e95d62c4a338b5309320e484359a8b73bc6b3b2e9bc2fd651.json create mode 100644 backend/.sqlx/query-2f06fd0d42f41e4773b965933e80ed30e7d404befdd9d556bdc9cde3ab6f790f.json create mode 100644 backend/.sqlx/query-2f2ef9b1ccff527c48fa01cf1b78cd0e58c8d534ac22ec0356d82a854b31d087.json create mode 100644 backend/.sqlx/query-31df83e9eb6078e93ec5fe4168306caccb849db9e0f71d86da655b01c6a3e8d0.json create mode 100644 backend/.sqlx/query-31f6f0712f1f53362628a487e8cd35d6d711b05d6eefffdc8e6a1b9c9bc7085d.json create mode 100644 backend/.sqlx/query-33351de09c72ccc0a39eb977d26f867595813bfa1ae0b26bc4181780801294bf.json create mode 100644 backend/.sqlx/query-4244640e62fffb0f6978f8f7d78291b3294a6a7d1549d752f14acf5972552ba5.json create mode 100644 backend/.sqlx/query-43aa468aac174529a74e6108af55a383f8a20e98b8c502929f4dc5041a55e72f.json create mode 100644 backend/.sqlx/query-4535c8effd1bae49894d13293a37e1ee949cf9108239032cb3addbf350fb33de.json create mode 100644 backend/.sqlx/query-45affcb4aeabf80d825628002903709851edd9db133494f2ff07e40c020f4be1.json create mode 100644 backend/.sqlx/query-46046b1435d1f01e6f7cafe054367d88ddbdfe81237b72d5efbc630278e44bb5.json create mode 100644 backend/.sqlx/query-464c51a8ea8c06232d33c45b8e59008b382c445476117a2311030734bc6d6d53.json create mode 100644 backend/.sqlx/query-46ac665ac5662bd89cdc92cd84d852b49429f42e394aeec9fb82c53f8bcadc54.json create mode 100644 backend/.sqlx/query-47e6b25cc092ec8718a6581c76aca10b275653e10ea4aa17a8ef5091ca09294a.json create mode 100644 backend/.sqlx/query-47fb4bceddd36fa60ccbe84f6341436fb133f0b3c97abb1267a9c12326dd1a33.json create mode 100644 backend/.sqlx/query-488dd591096b2b47787afdc3a1d73917ed13269f2ee20b86df79fca2c8efe672.json create mode 100644 backend/.sqlx/query-4a26b37d07e8ef22b34c7d94c194865c60310ef666fc1170d64df50548dddf7b.json create mode 100644 backend/.sqlx/query-4c5502cdf49a59ce6d8bdce90e1cba3a2bc575dc69f6aa01fc6df1b5d095ab41.json create mode 100644 backend/.sqlx/query-4d4cc1ed4857edb473dc9397ca90de3797d671a7c0395d16eec8a2f84c1fe3d1.json create mode 100644 backend/.sqlx/query-4eb44e8ea9313646749bb73dfa6237081246a2985eec1d0d933dfedf634a7191.json create mode 100644 backend/.sqlx/query-519f4f76649947f036a2129c11e92ef0ad30e39eec59c27bae8cb0622062c8fb.json create mode 100644 backend/.sqlx/query-51ddbfec67af268d9bbee12b2730d6109d2a6633e62ce708bad8af1a9f8c3925.json create mode 100644 backend/.sqlx/query-55002cccf17e32af5b077a17707122cfeeeebf5a9392c6798a486274d8d233d5.json create mode 100644 backend/.sqlx/query-56d6b1f96680f6963b674c67b85d539d8fac138f409da7f712fcba0b21c572ab.json create mode 100644 backend/.sqlx/query-572492b357bc4e2576d77ed135682f4623c74e61759dda257bd1f633f1a2b725.json create mode 100644 backend/.sqlx/query-58c0eb36b630d5eba9d12edca672ebd56e13193395701e26328a243055bee6b8.json create mode 100644 backend/.sqlx/query-5962a611bf5823336340fe8d52dd7634fb3aee2af76eb9c7a2abe238159c3e22.json create mode 100644 backend/.sqlx/query-597b148ff09a1e0f369bb04781ee4e429ebce64a4d5c16b0f136142ad213cdb1.json create mode 100644 backend/.sqlx/query-59f58aed612ef6b87b9e86c8ac752c0359cc618af85b5d86ff8e36ae381d5c69.json create mode 100644 backend/.sqlx/query-5bd7425430868a956cf27c442f51480681a623eba0ca97d690c50f1c4cd1613b.json create mode 100644 backend/.sqlx/query-5d1cb6b4dccc6e26c969dce4c5ad294361808a1f8d45885ea772ae553d540f7c.json create mode 100644 backend/.sqlx/query-64bead2fca9b715d8adb23fc44e35270cf56aa9bf7af16b198659d638a4d86eb.json create mode 100644 backend/.sqlx/query-656b99355d9668925a848a5518168b623a2fb015e6cc1baa4e3b974a8bc458f6.json create mode 100644 backend/.sqlx/query-67afe352fc26dda9107c90e50e954642d877178ce2c0e73b72c3824135ef86f4.json create mode 100644 backend/.sqlx/query-689649d4a2d1e73e2616cbb647e0a69f911d47d7c71c6021112900f7fd560c85.json create mode 100644 backend/.sqlx/query-689b9b39c2e51440b428b521a60eca212d91bb79365a117d754f1d47d8f1ba23.json create mode 100644 backend/.sqlx/query-6ab112fa42a9ae332bfa30427b70fa742351c5c180ac3de106df54f7badb494c.json create mode 100644 backend/.sqlx/query-6b4a47300708e7b34a621a8a7fb9cbf0df8221a7734a0d97803e30071cc87566.json create mode 100644 backend/.sqlx/query-6bdb3fcfe16fc40222dc7010a11026d4d4e0d381b31fe02da7d2667c0cdc1a85.json create mode 100644 backend/.sqlx/query-6d5bc1fd92fbc9a97567a697e992f3aa0e058bf353fee1b0755a798e00061ccf.json create mode 100644 backend/.sqlx/query-6daf2aca3e272a1efd7b26b4c80d6b490628f502ed9f9de75cc78d0fe196f260.json create mode 100644 backend/.sqlx/query-6ff7a025f529c077c1b6c9632a367aa29e2f0fdac3f1984550484d5a06a6ea21.json create mode 100644 backend/.sqlx/query-70e740465e648d84d32a506916ed437d1e32d4b5bdc7f7fae22e804428660836.json create mode 100644 backend/.sqlx/query-740eaa62280c83fda48f4505bbf488bee8b3e3fc8ee25a513c10b1376d69f81a.json create mode 100644 backend/.sqlx/query-7463c1eb3f690239c9277b62d935af4f372c07263c6f86bc89b0d25cb573fa80.json create mode 100644 backend/.sqlx/query-74dbd5a09255c30991078492ba3850e02ffbef25fbfd29cbedc041b0e439e580.json create mode 100644 backend/.sqlx/query-78e6e2bd3e009055812202e763294304872cf2136b15076f6712a77d12a648f1.json create mode 100644 backend/.sqlx/query-798a6bdc8f2fb9421808e084fbddf7de61fa9953998e3dc66af0e5f7491a65f4.json create mode 100644 backend/.sqlx/query-7af1cf089022fc1c3597b270b69aa669a153ab0c0bb2807cd4f7fd405afa6f69.json create mode 100644 backend/.sqlx/query-7b1e6b67a20ae1128118d5f5cc0db4007fb9dc6fd20582a46ebb951fca3a7abd.json create mode 100644 backend/.sqlx/query-7c7234d8234fbaff4d42eb47d1e1ea1747ffe28986055c01a4661628e47c0e51.json create mode 100644 backend/.sqlx/query-7cb0b49f5898b1adbf6579922aa02be4b9550b043240b2076e105976fedb789c.json create mode 100644 backend/.sqlx/query-7cc627bc2823d3124c4d20c1c0b277948c1f579ef0f6419a113f38ee71048077.json create mode 100644 backend/.sqlx/query-7d07a717533bfcaf581f6655bc387095542490fbb4aae30ec7fa75c2dae98ec8.json create mode 100644 backend/.sqlx/query-8123ba05f6e7b9bd395175ee4ec0c36c3726b2da9ff3592589ac7e83df1c537c.json create mode 100644 backend/.sqlx/query-830297547ea33969f96a5c4c2b82a540f1d147948e1b1a7523b21151ffa22305.json create mode 100644 backend/.sqlx/query-8381cee8bec06f8eff847b22d25574fc2cbcb32fef9f44632a8bc4c5c487fc4f.json create mode 100644 backend/.sqlx/query-8440081df15fa7874dded86b8af572d971d5892b7dd4c8e824b953113bd6c4a9.json create mode 100644 backend/.sqlx/query-84ad6f24aa9ccdc6f6e705b7de1bb707637a9bb1475711f9f9f3f2a6e28cb169.json create mode 100644 backend/.sqlx/query-8512298656d6872a2123c2801d842119537374a815c1292fac4c5b75bdd58898.json create mode 100644 backend/.sqlx/query-85705fc3d7f8ba5f1b12d5fb222c38fc64deb1226aab9dc3bc4465324fce37d1.json create mode 100644 backend/.sqlx/query-86446941173c8fd5c672f9a0218d85b7b8667c1ae76c4c7bd2db52dc5a8c7778.json create mode 100644 backend/.sqlx/query-86cc1e3c18e936a700d8842a51a6f8377ec190669e2e22f8d511871d6fbe07b8.json create mode 100644 backend/.sqlx/query-86e730a3481868a8d63824904df42788dd8a777d643d46d5086b5f8f33bbc97e.json create mode 100644 backend/.sqlx/query-87d1daaa2c2946b1be0ca4509d9746692c1112f524727fcc0d56479adca15011.json create mode 100644 backend/.sqlx/query-902a16d9e7ac34e7f1a0ad633bae754a0d64bc934a2b6434b9cb67f28d3ef950.json create mode 100644 backend/.sqlx/query-90fbb9430ab03ce3aadd95cc263e5a3d1a91ea02de7608676575e1c03023ed71.json create mode 100644 backend/.sqlx/query-94831baa639d7546f98f24847c0f93697ee1edcee0acf4a0684a28ff66ef735a.json create mode 100644 backend/.sqlx/query-95ae90094ec0e2c22660cc2e3788b22231dab9c558723cc54894597ce4cd3d5a.json create mode 100644 backend/.sqlx/query-992ebe3a73d0bd3b2122c08ca41b2d2deae208d7e95dd7d39884b7058a0512ff.json create mode 100644 backend/.sqlx/query-9b2c42327378963b16eec5037537b8d9ae95c194b201bd1ce1d8f90925d7dba8.json create mode 100644 backend/.sqlx/query-9c19ad9ab14325587d662539c04e18e8dfbdb0bf1dd4c0dc07a55f4eeb4eb5f8.json create mode 100644 backend/.sqlx/query-9c7b2d9708a65764d91bf7f8a2cb9e59aba53d8e17f1d8a887128b7863c3924d.json create mode 100644 backend/.sqlx/query-9dd243640439ffc1451cf2bfeb2492e706159b2bc00da9758cccd3c4bcf2a918.json create mode 100644 backend/.sqlx/query-9e7e6fe1dfba032e586f64531e12529d57faf5e77f6792d5bda608ff9658d7c9.json create mode 100644 backend/.sqlx/query-9f38007b605f51615f28aa248af5188d40b02b73c12a7905d59edaefc30e8872.json create mode 100644 backend/.sqlx/query-9fcceac37b2f3407357d495c0e1b534f885da45c18866731676d5b797d385ce1.json create mode 100644 backend/.sqlx/query-a0c35cb515a842067b294343c90f1bfbe4e2db85da9a478a07460733999e9beb.json create mode 100644 backend/.sqlx/query-a10e465cb27f7e29d921ea814dd676407f78dcf26f38c93e6097854561e5e2c0.json create mode 100644 backend/.sqlx/query-a2b7d364705468bd9dfb8f10e0bcc151604feae83d4671dfb9ca77eb8fd8d4f1.json create mode 100644 backend/.sqlx/query-a39199c8ce9d69784583752774941b7852d809ee6b2b39581cfc9ea1bff82c2d.json create mode 100644 backend/.sqlx/query-a52f64d349d3f27021d0608a710be64c1244f0382e7a7f81008f6d1f37d408fb.json create mode 100644 backend/.sqlx/query-a68754521bf751450602f04dd4243199a18885e1739a5a0e7f6100eab6f3c803.json create mode 100644 backend/.sqlx/query-a6d52684b02d648294c45f563e74c577ce2a41c932dc285ddd6e82a732662e1c.json create mode 100644 backend/.sqlx/query-aaacf51f3a43617b60e4253549ec4cc6a3648992b590c6e0121e4d7434da516b.json create mode 100644 backend/.sqlx/query-ab04cda71f8e2be9acbecabe1ee5ef756b8e5c1955fbe111df9ee171dc262338.json create mode 100644 backend/.sqlx/query-ab9e47e5b510e7df5a41db12896675393a6bb27f8e14245410751961218a7df5.json create mode 100644 backend/.sqlx/query-acc0b67c8e768b524b5cfb309e4307daeb0c095e07063c57b26ab94211bf6359.json create mode 100644 backend/.sqlx/query-b2e4dfaaee713604d0700ea4675ed2d4534e08471a60005bff85b952874c54c2.json create mode 100644 backend/.sqlx/query-b3fd0be8a7ef6d8dd8943aef45f1555d6e088cb981b230d1cbbf8e8b832cad43.json create mode 100644 backend/.sqlx/query-b4a9abcb38997587b28655b0f4a212a5bd4039b57fab20b163617e33a4c9dd46.json create mode 100644 backend/.sqlx/query-b6c146ef8db9b4d22a895853b951fef4daa669ec43ca3f457cc2c473b8e30b08.json create mode 100644 backend/.sqlx/query-b6f4037e559e0540f0c42b292a458c1aca73c0d750f589facccfcf73f9520861.json create mode 100644 backend/.sqlx/query-bcc79dc05e227625bc2ba70350a41a254e00e84030aefff91a7b420fe7513b0d.json create mode 100644 backend/.sqlx/query-bd0ca94343399fa2fa06d9304da4ed90a18a3c952783c419707ffbb38e232ce0.json create mode 100644 backend/.sqlx/query-be01bed8689bf58a7403021f8b6f149ae8c04beb9fcbff60c5ee65cf71b44927.json create mode 100644 backend/.sqlx/query-bff39cc57aba0729ddef1d53f3806c6736556f0a14b489d6708f9879393f9ea3.json create mode 100644 backend/.sqlx/query-c00bae0d8c9bee37cbad4de4cb02c80d00f52a3fc32bf32271ebc90f7837abda.json create mode 100644 backend/.sqlx/query-c03b4c5878bd0a0c545cb66b146d0b6ceae9de456d3ddb6008707589dbe30747.json create mode 100644 backend/.sqlx/query-c03decd33061ab912c9afed841092b05105c261976cce78c77d7f85abf9ad40c.json create mode 100644 backend/.sqlx/query-c5259e37703c3e48104438bad6e1f3615f4439c090a75e6fde03702a21589b25.json create mode 100644 backend/.sqlx/query-c6cab61c299163f99ebcf0338994f50c4aec75759cb928bba9e2e6a3a5b06edd.json create mode 100644 backend/.sqlx/query-cf85496774cd3ce58543dd0d52b5eadedad1a29049f8023e90deb57b19a41c72.json create mode 100644 backend/.sqlx/query-d25c58d2722ad3dcd91101ce6f66e1d802dd5d82e1cd5f5ed3a15cbc75eb6745.json create mode 100644 backend/.sqlx/query-d2a9e6a31bab0551d32093b1afe4e5d414cf439e4db3325b5bf6bbeb86c5bd2a.json create mode 100644 backend/.sqlx/query-d4d83d8177144c91aa489b5a42a45c83f8b069a52f681f14afb4931ac77baf45.json create mode 100644 backend/.sqlx/query-d6c8f4e49cf7b6db5c979c88e02bd41c7b823186ac056a0a676da85dc5d9a027.json create mode 100644 backend/.sqlx/query-d70ad8045589d746fc224ca950dcf967033942e489831cb5911723af1ec5161e.json create mode 100644 backend/.sqlx/query-d988e91087695742d75946100cf2b7593cb8eed2a97411697819849958c022b3.json create mode 100644 backend/.sqlx/query-d99ae453289ae967ce92a221375aeca87ed9764bfa683b9d49294c24495ad392.json create mode 100644 backend/.sqlx/query-da57f154108469af911579c7e021eacb8fdccab2bd7dec3fcc5c08f7685734d2.json create mode 100644 backend/.sqlx/query-dc1d42c31155a03d6ecc279d08f7c5efa273188771db61fb0632c88e8b5c8558.json create mode 100644 backend/.sqlx/query-dd5432830c1555ecbd6a0e45988fff455f3191f152e0900e09b71abb90d074ab.json create mode 100644 backend/.sqlx/query-defd99dd2427cdc54bb662d1ba3a1aea7f410ef204ec3465f4fb6c9acd256c95.json create mode 100644 backend/.sqlx/query-df43c959e1f20cff2394c6d226439d688c979cff8c0bdd02e4ec91891bf0b3a6.json create mode 100644 backend/.sqlx/query-e01c6ed633560647cfdf24f79298c0213ae4f66fdcc3a486120bb5116b1ce086.json create mode 100644 backend/.sqlx/query-e47b00656e5a3321fefef0e02cdec4dd12900340d7ffcc076a4f969e0b73c4a8.json create mode 100644 backend/.sqlx/query-e64931144cb58991c13d475cd225a7c20cf22ca7b31334a173bafafa760e1a78.json create mode 100644 backend/.sqlx/query-e653d36b607a16c0dfc0324690942ab25883b53a81ebb581fe019af2ec5eb567.json create mode 100644 backend/.sqlx/query-e67509f23f769854c8ee74677b4634b90631242e3e3991659a785ff1256d5f4d.json create mode 100644 backend/.sqlx/query-e9533284529b19582eadad5c9287bcffe58dfc26ad4b6ad56390105fded5034f.json create mode 100644 backend/.sqlx/query-ecd62c48fe2fba2fc2582e9e7ae5590d5dea8c67f6ae7b14743ac4f265dd89a3.json create mode 100644 backend/.sqlx/query-ef8413620c6860c3bf200894c5917c9209817a9cd8eb3bcd05a74f55423054ae.json create mode 100644 backend/.sqlx/query-f0d3c2641924b1f1026d4dfa19290850047653549ee20870e6272cb7d4cfb9aa.json create mode 100644 backend/.sqlx/query-f0fdeb7aea3e71099e7db0f4343bbd7ec86610ddc8589bf5b606fab0947c8b75.json create mode 100644 backend/.sqlx/query-f2e0bf4cad9c68220f1955af425de9a730290525862e4ac27dccfdc51ee10093.json create mode 100644 backend/.sqlx/query-f3571e1d2b57011e5f6a38725eb42d909456d28a98563923cca43e760862e5e0.json create mode 100644 backend/.sqlx/query-f5d8c9ad5a64a7e2531bc84d26f942d30ecbf3f7097cb25f5ad5841756e3e61e.json create mode 100644 backend/.sqlx/query-f746c5d2ffa24dd75124b5f8d04f587c9581ef407417ec979b8a3ab07816cc69.json create mode 100644 backend/.sqlx/query-fc7c71d4744a2b5d5511a04eb1bf8268962b79a6884d7011fd02fd4809eede10.json create mode 100644 backend/.sqlx/query-fdedd3909a97db5d43d9c46ff77b800b8efd647121b538deb023f96dbaac3715.json create mode 100644 backend/.sqlx/query-ff0403790674cdb07022af71c2377afbd8b3a660b3be27514b517c077c63c238.json create mode 100644 backend/migrations/20250117145630_v2_queue_compatibility_view.down.sql create mode 100644 backend/migrations/20250117145630_v2_queue_compatibility_view.up.sql create mode 100644 backend/migrations/20250117145631_v2_completed_job_compatibility_view.down.sql create mode 100644 backend/migrations/20250117145631_v2_completed_job_compatibility_view.up.sql diff --git a/backend/.sqlx/query-00b6340396f5121aff49323d12a7e3244ebb2cb864828637e7ff7476a4a4939b.json b/backend/.sqlx/query-00b6340396f5121aff49323d12a7e3244ebb2cb864828637e7ff7476a4a4939b.json new file mode 100644 index 0000000000000..389390524e1e0 --- /dev/null +++ b/backend/.sqlx/query-00b6340396f5121aff49323d12a7e3244ebb2cb864828637e7ff7476a4a4939b.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT runnable_path FROM v2_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "runnable_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "00b6340396f5121aff49323d12a7e3244ebb2cb864828637e7ff7476a4a4939b" +} diff --git a/backend/.sqlx/query-00e63eab76d26e148b77e932848de74e8b0943d30481465da453942e299a128f.json b/backend/.sqlx/query-00e63eab76d26e148b77e932848de74e8b0943d30481465da453942e299a128f.json new file mode 100644 index 0000000000000..9c1f8d00a7d88 --- /dev/null +++ b/backend/.sqlx/query-00e63eab76d26e148b77e932848de74e8b0943d30481465da453942e299a128f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO metrics (id, value)\n VALUES ($1, to_jsonb((\n SELECT EXTRACT(EPOCH FROM now() - scheduled_for)\n FROM v2_job_queue\n WHERE tag = $2 AND running = false AND scheduled_for <= now() - ('3 seconds')::interval\n ORDER BY priority DESC NULLS LAST, scheduled_for LIMIT 1\n )))", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "00e63eab76d26e148b77e932848de74e8b0943d30481465da453942e299a128f" +} diff --git a/backend/.sqlx/query-01ece1b2ec8ec9a69e0ab21f439b3019bcc501362fda2ed35dd3037c3e8a6eb7.json b/backend/.sqlx/query-01ece1b2ec8ec9a69e0ab21f439b3019bcc501362fda2ed35dd3037c3e8a6eb7.json new file mode 100644 index 0000000000000..8730c1a5a1f67 --- /dev/null +++ b/backend/.sqlx/query-01ece1b2ec8ec9a69e0ab21f439b3019bcc501362fda2ed35dd3037c3e8a6eb7.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE\n FROM parallel_monitor_lock\n WHERE last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval \n RETURNING parent_flow_id, job_id, last_ping, (SELECT workspace_id FROM v2_job_queue q\n WHERE q.id = parent_flow_id AND q.running = true AND q.canceled_by IS NULL) AS workspace_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "parent_flow_id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "job_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "last_ping", + "type_info": "Timestamptz" + }, + { + "ordinal": 3, + "name": "workspace_id", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + true, + null + ] + }, + "hash": "01ece1b2ec8ec9a69e0ab21f439b3019bcc501362fda2ed35dd3037c3e8a6eb7" +} diff --git a/backend/.sqlx/query-02b06e7ffde8896c46603d830ccc540e129eb27f869431bcf8182585c8ba699c.json b/backend/.sqlx/query-02b06e7ffde8896c46603d830ccc540e129eb27f869431bcf8182585c8ba699c.json new file mode 100644 index 0000000000000..a0700fde0d74b --- /dev/null +++ b/backend/.sqlx/query-02b06e7ffde8896c46603d830ccc540e129eb27f869431bcf8182585c8ba699c.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = JSONB_SET(flow_status, ARRAY['preprocessor_module'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "02b06e7ffde8896c46603d830ccc540e129eb27f869431bcf8182585c8ba699c" +} diff --git a/backend/.sqlx/query-05d6405b2cc6aabf564a10f05402878e9f2a13e0ce0dad42723f95ac7fb15d4b.json b/backend/.sqlx/query-05d6405b2cc6aabf564a10f05402878e9f2a13e0ce0dad42723f95ac7fb15d4b.json new file mode 100644 index 0000000000000..195c9bfe929b2 --- /dev/null +++ b/backend/.sqlx/query-05d6405b2cc6aabf564a10f05402878e9f2a13e0ce0dad42723f95ac7fb15d4b.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT success AS \"success!\"\n FROM v2_as_completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "05d6405b2cc6aabf564a10f05402878e9f2a13e0ce0dad42723f95ac7fb15d4b" +} diff --git a/backend/.sqlx/query-099894523449a70eb301ecd1d744210d39f405016ffec374e4b15a2528baccb5.json b/backend/.sqlx/query-099894523449a70eb301ecd1d744210d39f405016ffec374e4b15a2528baccb5.json new file mode 100644 index 0000000000000..8a4a9b9c20907 --- /dev/null +++ b/backend/.sqlx/query-099894523449a70eb301ecd1d744210d39f405016ffec374e4b15a2528baccb5.json @@ -0,0 +1,198 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT schedule.*, t.jobs FROM schedule, LATERAL ( SELECT ARRAY (SELECT json_build_object('id', id, 'success', success, 'duration_ms', duration_ms) FROM v2_as_completed_job WHERE\n v2_as_completed_job.schedule_path = schedule.path AND v2_as_completed_job.workspace_id = $1 AND parent_job IS NULL AND is_skipped = False ORDER BY started_at DESC LIMIT 20) AS jobs ) t\n WHERE schedule.workspace_id = $1 ORDER BY schedule.edited_at desc LIMIT $2 OFFSET $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "workspace_id", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "path", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "edited_by", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "edited_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 4, + "name": "schedule", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "enabled", + "type_info": "Bool" + }, + { + "ordinal": 6, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "args", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "extra_perms", + "type_info": "Jsonb" + }, + { + "ordinal": 9, + "name": "is_flow", + "type_info": "Bool" + }, + { + "ordinal": 10, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 11, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 12, + "name": "timezone", + "type_info": "Varchar" + }, + { + "ordinal": 13, + "name": "on_failure", + "type_info": "Varchar" + }, + { + "ordinal": 14, + "name": "on_recovery", + "type_info": "Varchar" + }, + { + "ordinal": 15, + "name": "on_failure_times", + "type_info": "Int4" + }, + { + "ordinal": 16, + "name": "on_failure_exact", + "type_info": "Bool" + }, + { + "ordinal": 17, + "name": "on_failure_extra_args", + "type_info": "Json" + }, + { + "ordinal": 18, + "name": "on_recovery_times", + "type_info": "Int4" + }, + { + "ordinal": 19, + "name": "on_recovery_extra_args", + "type_info": "Json" + }, + { + "ordinal": 20, + "name": "ws_error_handler_muted", + "type_info": "Bool" + }, + { + "ordinal": 21, + "name": "retry", + "type_info": "Jsonb" + }, + { + "ordinal": 22, + "name": "summary", + "type_info": "Varchar" + }, + { + "ordinal": 23, + "name": "no_flow_overlap", + "type_info": "Bool" + }, + { + "ordinal": 24, + "name": "tag", + "type_info": "Varchar" + }, + { + "ordinal": 25, + "name": "paused_until", + "type_info": "Timestamptz" + }, + { + "ordinal": 26, + "name": "on_success", + "type_info": "Varchar" + }, + { + "ordinal": 27, + "name": "on_success_extra_args", + "type_info": "Json" + }, + { + "ordinal": 28, + "name": "cron_version", + "type_info": "Text" + }, + { + "ordinal": 29, + "name": "jobs", + "type_info": "JsonArray" + } + ], + "parameters": { + "Left": [ + "Text", + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + true, + false, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + false, + true, + true, + true, + true, + true, + null + ] + }, + "hash": "099894523449a70eb301ecd1d744210d39f405016ffec374e4b15a2528baccb5" +} diff --git a/backend/.sqlx/query-0bc1c617786bb2fdc71b85442b1d52dbd4c922436edacced18b9620c70e0cc8b.json b/backend/.sqlx/query-0bc1c617786bb2fdc71b85442b1d52dbd4c922436edacced18b9620c70e0cc8b.json new file mode 100644 index 0000000000000..104cedde3b0e2 --- /dev/null +++ b/backend/.sqlx/query-0bc1c617786bb2fdc71b85442b1d52dbd4c922436edacced18b9620c70e0cc8b.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result #> $3 AS \"result: Json>\"\n FROM v2_job_completed WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + null + ] + }, + "hash": "0bc1c617786bb2fdc71b85442b1d52dbd4c922436edacced18b9620c70e0cc8b" +} diff --git a/backend/.sqlx/query-0c3d481e35e0b4eb72e391b4cffea9916bced29ad1b993ce950c586c11df5a1d.json b/backend/.sqlx/query-0c3d481e35e0b4eb72e391b4cffea9916bced29ad1b993ce950c586c11df5a1d.json new file mode 100644 index 0000000000000..058bf13db0ab3 --- /dev/null +++ b/backend/.sqlx/query-0c3d481e35e0b4eb72e391b4cffea9916bced29ad1b993ce950c586c11df5a1d.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_completed AS cj\n ( workspace_id\n , id\n , started_at\n , duration_ms\n , result\n , canceled_by\n , canceled_reason\n , flow_status\n , memory_peak\n , status\n )\n VALUES ($1, $2, $3, COALESCE($12::bigint, (EXTRACT('epoch' FROM (now())) - EXTRACT('epoch' FROM (COALESCE($3, now()))))*1000), $5, $7, $8, $9,$11, CASE WHEN $6::BOOL THEN 'canceled'::job_status\n WHEN $10::BOOL THEN 'skipped'::job_status\n WHEN $4::BOOL THEN 'success'::job_status\n ELSE 'failure'::job_status END)\n ON CONFLICT (id) DO UPDATE SET status = EXCLUDED.status, result = $5 RETURNING duration_ms AS \"duration_ms!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "duration_ms!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Uuid", + "Timestamptz", + "Bool", + "Jsonb", + "Bool", + "Varchar", + "Text", + "Jsonb", + "Bool", + "Int4", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "0c3d481e35e0b4eb72e391b4cffea9916bced29ad1b993ce950c586c11df5a1d" +} diff --git a/backend/.sqlx/query-0ea5ba568ec0f62b808fe938a41174646b6bdd658b8461db1bb90a871d076718.json b/backend/.sqlx/query-0ea5ba568ec0f62b808fe938a41174646b6bdd658b8461db1bb90a871d076718.json new file mode 100644 index 0000000000000..19fe39406796e --- /dev/null +++ b/backend/.sqlx/query-0ea5ba568ec0f62b808fe938a41174646b6bdd658b8461db1bb90a871d076718.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue SET workspace_id = $1 WHERE workspace_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "0ea5ba568ec0f62b808fe938a41174646b6bdd658b8461db1bb90a871d076718" +} diff --git a/backend/.sqlx/query-0f01cbab131eebc610c0814d97814d3a73947cf4b365b972901fc5814fc31272.json b/backend/.sqlx/query-0f01cbab131eebc610c0814d97814d3a73947cf4b365b972901fc5814fc31272.json new file mode 100644 index 0000000000000..4b72369bad1ed --- /dev/null +++ b/backend/.sqlx/query-0f01cbab131eebc610c0814d97814d3a73947cf4b365b972901fc5814fc31272.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = JSONB_SET(flow_status, ARRAY['step'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "0f01cbab131eebc610c0814d97814d3a73947cf4b365b972901fc5814fc31272" +} diff --git a/backend/.sqlx/query-0f23535bdfe222eee5a27d52fe4fdce4ad1a487afcd62735ef25386586cfc036.json b/backend/.sqlx/query-0f23535bdfe222eee5a27d52fe4fdce4ad1a487afcd62735ef25386586cfc036.json new file mode 100644 index 0000000000000..c30c505a7b1dd --- /dev/null +++ b/backend/.sqlx/query-0f23535bdfe222eee5a27d52fe4fdce4ad1a487afcd62735ef25386586cfc036.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_job_queue WHERE id = any($1) AND workspace_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray", + "Text" + ] + }, + "nullable": [] + }, + "hash": "0f23535bdfe222eee5a27d52fe4fdce4ad1a487afcd62735ef25386586cfc036" +} diff --git a/backend/.sqlx/query-11aa92258d927ff46931730df56fb98f871164756528f6ec2a90f3694e650165.json b/backend/.sqlx/query-11aa92258d927ff46931730df56fb98f871164756528f6ec2a90f3694e650165.json new file mode 100644 index 0000000000000..f5a02051b5486 --- /dev/null +++ b/backend/.sqlx/query-11aa92258d927ff46931730df56fb98f871164756528f6ec2a90f3694e650165.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = flow_status - 'approval_conditions'\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "11aa92258d927ff46931730df56fb98f871164756528f6ec2a90f3694e650165" +} diff --git a/backend/.sqlx/query-11db65c493990f6935103033b2fbb0c08ae6d91b05b2f3f7c89a990d1d5a5f8a.json b/backend/.sqlx/query-11db65c493990f6935103033b2fbb0c08ae6d91b05b2f3f7c89a990d1d5a5f8a.json new file mode 100644 index 0000000000000..1afb036030a2c --- /dev/null +++ b/backend/.sqlx/query-11db65c493990f6935103033b2fbb0c08ae6d91b05b2f3f7c89a990d1d5a5f8a.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(id) FROM v2_as_queue WHERE running = true AND email = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "11db65c493990f6935103033b2fbb0c08ae6d91b05b2f3f7c89a990d1d5a5f8a" +} diff --git a/backend/.sqlx/query-1260543e36028c1322c60ce028657b50b781da41cb6f952beafabd042da11fc3.json b/backend/.sqlx/query-1260543e36028c1322c60ce028657b50b781da41cb6f952beafabd042da11fc3.json new file mode 100644 index 0000000000000..d9402c9ef6cf1 --- /dev/null +++ b/backend/.sqlx/query-1260543e36028c1322c60ce028657b50b781da41cb6f952beafabd042da11fc3.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n id As \"id!\",\n flow_status->'restarted_from'->'flow_job_id' AS \"restarted_from: Json\"\n FROM v2_as_queue\n WHERE COALESCE((SELECT flow_root_job FROM v2_job WHERE id = $1), $1) = id AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "restarted_from: Json", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + null + ] + }, + "hash": "1260543e36028c1322c60ce028657b50b781da41cb6f952beafabd042da11fc3" +} diff --git a/backend/.sqlx/query-12828c9b2964f2b484a68de1e01b65cdcd277257192ee0a6d18a00f41bce49d4.json b/backend/.sqlx/query-12828c9b2964f2b484a68de1e01b65cdcd277257192ee0a6d18a00f41bce49d4.json new file mode 100644 index 0000000000000..84c0804c950c9 --- /dev/null +++ b/backend/.sqlx/query-12828c9b2964f2b484a68de1e01b65cdcd277257192ee0a6d18a00f41bce49d4.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT suspend > 0 AS \"r!\" FROM v2_job_queue WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "r!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "12828c9b2964f2b484a68de1e01b65cdcd277257192ee0a6d18a00f41bce49d4" +} diff --git a/backend/.sqlx/query-12be3655e9c916598f83ae990c18fed0c55ffee22d7eecfcc7e2b4f470448597.json b/backend/.sqlx/query-12be3655e9c916598f83ae990c18fed0c55ffee22d7eecfcc7e2b4f470448597.json new file mode 100644 index 0000000000000..a9c6ae8a34abf --- /dev/null +++ b/backend/.sqlx/query-12be3655e9c916598f83ae990c18fed0c55ffee22d7eecfcc7e2b4f470448597.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = JSONB_SET(\n flow_status,\n ARRAY['modules', $1::TEXT, 'iterator', 'index'],\n ((flow_status->'modules'->$1::int->'iterator'->>'index')::int + 1)::text::jsonb\n )\n WHERE id = $2\n RETURNING (flow_status->'modules'->$1::int->'iterator'->>'index')::int", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "int4", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "12be3655e9c916598f83ae990c18fed0c55ffee22d7eecfcc7e2b4f470448597" +} diff --git a/backend/.sqlx/query-1959f48940ad2870b7cc67be19f34930a56ea3de29c0f279e9480434336f4914.json b/backend/.sqlx/query-1959f48940ad2870b7cc67be19f34930a56ea3de29c0f279e9480434336f4914.json new file mode 100644 index 0000000000000..d69739d6d5b11 --- /dev/null +++ b/backend/.sqlx/query-1959f48940ad2870b7cc67be19f34930a56ea3de29c0f279e9480434336f4914.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue q SET suspend = 0\n FROM v2_job j, v2_job_flow_runtime f\n WHERE parent_job = $1\n AND f.id = j.id AND q.id = j.id\n AND suspend = $2 AND (f.flow_status->'step')::int = 0", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "1959f48940ad2870b7cc67be19f34930a56ea3de29c0f279e9480434336f4914" +} diff --git a/backend/.sqlx/query-19cc8499f682ec34d54bc4f694cb281a9bd7f5431c646c6268513751fff95395.json b/backend/.sqlx/query-19cc8499f682ec34d54bc4f694cb281a9bd7f5431c646c6268513751fff95395.json new file mode 100644 index 0000000000000..0c379a7bdb028 --- /dev/null +++ b/backend/.sqlx/query-19cc8499f682ec34d54bc4f694cb281a9bd7f5431c646c6268513751fff95395.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT coalesce(COUNT(*) FILTER(WHERE suspend = 0 AND running = false), 0) as \"database_length!\", coalesce(COUNT(*) FILTER(WHERE suspend > 0), 0) as \"suspended!\" FROM v2_as_queue WHERE (workspace_id = $1 OR $2) AND scheduled_for <= now()", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "database_length!", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "suspended!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Bool" + ] + }, + "nullable": [ + null, + null + ] + }, + "hash": "19cc8499f682ec34d54bc4f694cb281a9bd7f5431c646c6268513751fff95395" +} diff --git a/backend/.sqlx/query-1a2ee2ac2231e60cd5b862ea7c6c5b65a2a862adafcca3769eb5a13e08d2805b.json b/backend/.sqlx/query-1a2ee2ac2231e60cd5b862ea7c6c5b65a2a862adafcca3769eb5a13e08d2805b.json new file mode 100644 index 0000000000000..aa00510d835ca --- /dev/null +++ b/backend/.sqlx/query-1a2ee2ac2231e60cd5b862ea7c6c5b65a2a862adafcca3769eb5a13e08d2805b.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_flow_runtime (id, flow_status)\n SELECT unnest($1::uuid[]), $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "1a2ee2ac2231e60cd5b862ea7c6c5b65a2a862adafcca3769eb5a13e08d2805b" +} diff --git a/backend/.sqlx/query-1a85ca0a6d0ba5ab3462907e35037136fd123f80389abe6ebbcf12084da45868.json b/backend/.sqlx/query-1a85ca0a6d0ba5ab3462907e35037136fd123f80389abe6ebbcf12084da45868.json new file mode 100644 index 0000000000000..1d83c36fd8480 --- /dev/null +++ b/backend/.sqlx/query-1a85ca0a6d0ba5ab3462907e35037136fd123f80389abe6ebbcf12084da45868.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_job_queue WHERE workspace_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [] + }, + "hash": "1a85ca0a6d0ba5ab3462907e35037136fd123f80389abe6ebbcf12084da45868" +} diff --git a/backend/.sqlx/query-1a9984fa378634f7f12356407831807b5e3415c28aacbf09cf7bac58bb1d8470.json b/backend/.sqlx/query-1a9984fa378634f7f12356407831807b5e3415c28aacbf09cf7bac58bb1d8470.json new file mode 100644 index 0000000000000..3f9c7a61a4685 --- /dev/null +++ b/backend/.sqlx/query-1a9984fa378634f7f12356407831807b5e3415c28aacbf09cf7bac58bb1d8470.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(id) FROM v2_job_queue WHERE running = true AND workspace_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "1a9984fa378634f7f12356407831807b5e3415c28aacbf09cf7bac58bb1d8470" +} diff --git a/backend/.sqlx/query-1af2ef2d22d344687756681bd16b40b2a9574f548e3ee60bb08926b017f6d657.json b/backend/.sqlx/query-1af2ef2d22d344687756681bd16b40b2a9574f548e3ee60bb08926b017f6d657.json new file mode 100644 index 0000000000000..d1866217a46ea --- /dev/null +++ b/backend/.sqlx/query-1af2ef2d22d344687756681bd16b40b2a9574f548e3ee60bb08926b017f6d657.json @@ -0,0 +1,92 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job (id, workspace_id, raw_code, raw_lock, raw_flow, tag, parent_job,\n created_by, permissioned_as, runnable_id, runnable_path, args, kind, trigger,\n script_lang, same_worker, pre_run_error, permissioned_as_email, visible_to_owner,\n flow_root_job, concurrent_limit, concurrency_time_window_s, timeout, flow_step_id,\n cache_ttl, priority, trigger_kind)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18,\n $19, $20, $21, $22, $23, $24, $25, $26,\n CASE WHEN $14::VARCHAR IS NOT NULL THEN 'schedule'::job_trigger_kind END)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Text", + "Text", + "Jsonb", + "Varchar", + "Uuid", + "Varchar", + "Varchar", + "Int8", + "Varchar", + "Jsonb", + { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + }, + "Varchar", + { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + }, + "Bool", + "Text", + "Varchar", + "Bool", + "Uuid", + "Int4", + "Int4", + "Int4", + "Varchar", + "Int4", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "1af2ef2d22d344687756681bd16b40b2a9574f548e3ee60bb08926b017f6d657" +} diff --git a/backend/.sqlx/query-1b58b90c184ca21d777ea4e264c79aecc2361134a4817c2b9580f2680425352d.json b/backend/.sqlx/query-1b58b90c184ca21d777ea4e264c79aecc2361134a4817c2b9580f2680425352d.json new file mode 100644 index 0000000000000..d0b582d17b8e8 --- /dev/null +++ b/backend/.sqlx/query-1b58b90c184ca21d777ea4e264c79aecc2361134a4817c2b9580f2680425352d.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT args AS \"args: Json>>\"\n FROM v2_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "args: Json>>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "1b58b90c184ca21d777ea4e264c79aecc2361134a4817c2b9580f2680425352d" +} diff --git a/backend/.sqlx/query-1d4718cbb0b5b66b91e8fffb6c0da6d734919a95c109822c30f4d49691b3c6b8.json b/backend/.sqlx/query-1d4718cbb0b5b66b91e8fffb6c0da6d734919a95c109822c30f4d49691b3c6b8.json new file mode 100644 index 0000000000000..cf99751a2a049 --- /dev/null +++ b/backend/.sqlx/query-1d4718cbb0b5b66b91e8fffb6c0da6d734919a95c109822c30f4d49691b3c6b8.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_flow_runtime (id, flow_status) VALUES ($1, $2)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "1d4718cbb0b5b66b91e8fffb6c0da6d734919a95c109822c30f4d49691b3c6b8" +} diff --git a/backend/.sqlx/query-1d819b829cd92995c39d29540df8cffbcc3334bada244a331a0bd8db06029d42.json b/backend/.sqlx/query-1d819b829cd92995c39d29540df8cffbcc3334bada244a331a0bd8db06029d42.json new file mode 100644 index 0000000000000..9f40c5293d524 --- /dev/null +++ b/backend/.sqlx/query-1d819b829cd92995c39d29540df8cffbcc3334bada244a331a0bd8db06029d42.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_job_completed c\n USING v2_job j\n WHERE\n created_at <= now() - ($1::bigint::text || ' s')::interval\n AND completed_at + ($1::bigint::text || ' s')::interval <= now()\n AND c.id = j.id\n RETURNING c.id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "1d819b829cd92995c39d29540df8cffbcc3334bada244a331a0bd8db06029d42" +} diff --git a/backend/.sqlx/query-1d87f41fd1abb9361d795a899120e6b77e24bf5a9044fdc5284d0d7f1e14eafa.json b/backend/.sqlx/query-1d87f41fd1abb9361d795a899120e6b77e24bf5a9044fdc5284d0d7f1e14eafa.json new file mode 100644 index 0000000000000..6a79013c0bcae --- /dev/null +++ b/backend/.sqlx/query-1d87f41fd1abb9361d795a899120e6b77e24bf5a9044fdc5284d0d7f1e14eafa.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO usage (id, is_workspace, month_, usage) \n VALUES ($1, TRUE, EXTRACT(YEAR FROM current_date) * 12 + EXTRACT(MONTH FROM current_date), $2) \n ON CONFLICT (id, is_workspace, month_) DO UPDATE SET usage = usage.usage + $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "1d87f41fd1abb9361d795a899120e6b77e24bf5a9044fdc5284d0d7f1e14eafa" +} diff --git a/backend/.sqlx/query-1ed4fbeafe678ff19100cb3e892662a9c3c16db9f328cf45eace40cd73c1acaa.json b/backend/.sqlx/query-1ed4fbeafe678ff19100cb3e892662a9c3c16db9f328cf45eace40cd73c1acaa.json new file mode 100644 index 0000000000000..13054ff82dea7 --- /dev/null +++ b/backend/.sqlx/query-1ed4fbeafe678ff19100cb3e892662a9c3c16db9f328cf45eace40cd73c1acaa.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = jsonb_set(\n jsonb_set(flow_status, ARRAY['modules', $3::INTEGER::TEXT, 'job'], to_jsonb($1::UUID::TEXT)),\n ARRAY['modules', $3::INTEGER::TEXT, 'type'],\n to_jsonb('InProgress'::text)\n )\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "1ed4fbeafe678ff19100cb3e892662a9c3c16db9f328cf45eace40cd73c1acaa" +} diff --git a/backend/.sqlx/query-202e580eacc7907bb34b6464a66fe96ff91951fc8486d1f0f5733b8e63f043bc.json b/backend/.sqlx/query-202e580eacc7907bb34b6464a66fe96ff91951fc8486d1f0f5733b8e63f043bc.json new file mode 100644 index 0000000000000..23d9bb4cc4f96 --- /dev/null +++ b/backend/.sqlx/query-202e580eacc7907bb34b6464a66fe96ff91951fc8486d1f0f5733b8e63f043bc.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_runtime SET ping = null WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "202e580eacc7907bb34b6464a66fe96ff91951fc8486d1f0f5733b8e63f043bc" +} diff --git a/backend/.sqlx/query-205e4faf72177cae685c7f0727659d0e223b98f3ca84fdff21b1bc57c2ca3512.json b/backend/.sqlx/query-205e4faf72177cae685c7f0727659d0e223b98f3ca84fdff21b1bc57c2ca3512.json new file mode 100644 index 0000000000000..980cc6787bdde --- /dev/null +++ b/backend/.sqlx/query-205e4faf72177cae685c7f0727659d0e223b98f3ca84fdff21b1bc57c2ca3512.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE job_logs SET logs = '##DELETED##' WHERE job_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "205e4faf72177cae685c7f0727659d0e223b98f3ca84fdff21b1bc57c2ca3512" +} diff --git a/backend/.sqlx/query-2104c634340c9289f0130a649b793338faeba38b788245af20f815de4125fc4e.json b/backend/.sqlx/query-2104c634340c9289f0130a649b793338faeba38b788245af20f815de4125fc4e.json new file mode 100644 index 0000000000000..4cf55857d59bd --- /dev/null +++ b/backend/.sqlx/query-2104c634340c9289f0130a649b793338faeba38b788245af20f815de4125fc4e.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH suspended AS (\n UPDATE v2_job_queue SET suspend = $2, suspend_until = now() + $3\n WHERE id = $4\n RETURNING id\n ), suspended_no_ping AS (\n UPDATE v2_job_runtime SET ping = null\n WHERE id = (SELECT id FROM suspended)\n RETURNING id\n ) UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(\n flow_status,\n ARRAY['modules', flow_status->>'step'::TEXT],\n $1\n ) WHERE id = (SELECT id FROM suspended_no_ping)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Int4", + "Interval", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "2104c634340c9289f0130a649b793338faeba38b788245af20f815de4125fc4e" +} diff --git a/backend/.sqlx/query-241270e20c751806dece12fbc2de360e389da8c7f653ef8e6bc0d30c823aea51.json b/backend/.sqlx/query-241270e20c751806dece12fbc2de360e389da8c7f653ef8e6bc0d30c823aea51.json new file mode 100644 index 0000000000000..eefc3dd8e8a78 --- /dev/null +++ b/backend/.sqlx/query-241270e20c751806dece12fbc2de360e389da8c7f653ef8e6bc0d30c823aea51.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT \n flow_status->>'step' = '0' \n AND (\n jsonb_array_length(flow_status->'modules') = 0 \n OR flow_status->'modules'->0->>'type' = 'WaitingForPriorSteps' \n OR (\n flow_status->'modules'->0->>'type' = 'Failure' \n AND flow_status->'modules'->0->>'job' = $1\n )\n )\n FROM v2_job_completed WHERE id = $2 AND workspace_id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "241270e20c751806dece12fbc2de360e389da8c7f653ef8e6bc0d30c823aea51" +} diff --git a/backend/.sqlx/query-2456fc71fc7a0758a4c1fbe77d72fbac2fead0e1bff4e909fd7fb1a41bc35d8f.json b/backend/.sqlx/query-2456fc71fc7a0758a4c1fbe77d72fbac2fead0e1bff4e909fd7fb1a41bc35d8f.json new file mode 100644 index 0000000000000..07ae8f17c52bf --- /dev/null +++ b/backend/.sqlx/query-2456fc71fc7a0758a4c1fbe77d72fbac2fead0e1bff4e909fd7fb1a41bc35d8f.json @@ -0,0 +1,41 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n script_path, args AS \"args: sqlx::types::Json>>\",\n tag AS \"tag!\", priority\n FROM v2_as_completed_job\n WHERE id = $1 and workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "args: sqlx::types::Json>>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "tag!", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "priority", + "type_info": "Int2" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "2456fc71fc7a0758a4c1fbe77d72fbac2fead0e1bff4e909fd7fb1a41bc35d8f" +} diff --git a/backend/.sqlx/query-25d05a1e10d1aaa3f7c3c3bea5f6b2fe3f690da7bc8dfd36b47ec619c4e31995.json b/backend/.sqlx/query-25d05a1e10d1aaa3f7c3c3bea5f6b2fe3f690da7bc8dfd36b47ec619c4e31995.json new file mode 100644 index 0000000000000..eb652ef49d823 --- /dev/null +++ b/backend/.sqlx/query-25d05a1e10d1aaa3f7c3c3bea5f6b2fe3f690da7bc8dfd36b47ec619c4e31995.json @@ -0,0 +1,54 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n running AS \"running!\",\n substr(concat(coalesce(v2_as_queue.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs,\n mem_peak,\n CASE WHEN is_flow_step is true then NULL else flow_status END AS \"flow_status: sqlx::types::Json>\",\n job_logs.log_offset + char_length(job_logs.logs) + 1 AS log_offset,\n created_by AS \"created_by!\"\n FROM v2_as_queue\n LEFT JOIN job_logs ON job_logs.job_id = v2_as_queue.id \n WHERE v2_as_queue.workspace_id = $2 AND v2_as_queue.id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "running!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "mem_peak", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 4, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Int4", + "Text", + "Uuid" + ] + }, + "nullable": [ + true, + null, + true, + null, + null, + true + ] + }, + "hash": "25d05a1e10d1aaa3f7c3c3bea5f6b2fe3f690da7bc8dfd36b47ec619c4e31995" +} diff --git a/backend/.sqlx/query-26bee60d5311b1454d2d14ddf7b030eb195900929a10632c36f7b03b6bb212a4.json b/backend/.sqlx/query-26bee60d5311b1454d2d14ddf7b030eb195900929a10632c36f7b03b6bb212a4.json new file mode 100644 index 0000000000000..c47f88ff1abdf --- /dev/null +++ b/backend/.sqlx/query-26bee60d5311b1454d2d14ddf7b030eb195900929a10632c36f7b03b6bb212a4.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET leaf_jobs = JSONB_SET(coalesce(leaf_jobs, '{}'::jsonb), ARRAY[$1::TEXT], $2)\n WHERE COALESCE((SELECT flow_root_job FROM v2_job WHERE id = $3), $3) = id", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "26bee60d5311b1454d2d14ddf7b030eb195900929a10632c36f7b03b6bb212a4" +} diff --git a/backend/.sqlx/query-280a361076d1c6317610765960f543252891c53351bdc98da66cc30ffc895866.json b/backend/.sqlx/query-280a361076d1c6317610765960f543252891c53351bdc98da66cc30ffc895866.json new file mode 100644 index 0000000000000..e9705c23a6342 --- /dev/null +++ b/backend/.sqlx/query-280a361076d1c6317610765960f543252891c53351bdc98da66cc30ffc895866.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT script_path FROM v2_as_completed_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "script_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "280a361076d1c6317610765960f543252891c53351bdc98da66cc30ffc895866" +} diff --git a/backend/.sqlx/query-28c40cfbe796c6fb942cc9f72e00f9d3de633cdc4f9e23c6d9bd27eba07b0cb2.json b/backend/.sqlx/query-28c40cfbe796c6fb942cc9f72e00f9d3de633cdc4f9e23c6d9bd27eba07b0cb2.json new file mode 100644 index 0000000000000..b39b9ef58aec1 --- /dev/null +++ b/backend/.sqlx/query-28c40cfbe796c6fb942cc9f72e00f9d3de633cdc4f9e23c6d9bd27eba07b0cb2.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT null FROM v2_job_queue WHERE id = $1 FOR UPDATE", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "28c40cfbe796c6fb942cc9f72e00f9d3de633cdc4f9e23c6d9bd27eba07b0cb2" +} diff --git a/backend/.sqlx/query-2c5a1b1ebd872ce8a03a1b7e5246c876750c85075cfda816c025c805d4c3cd4c.json b/backend/.sqlx/query-2c5a1b1ebd872ce8a03a1b7e5246c876750c85075cfda816c025c805d4c3cd4c.json new file mode 100644 index 0000000000000..6d916281eb874 --- /dev/null +++ b/backend/.sqlx/query-2c5a1b1ebd872ce8a03a1b7e5246c876750c85075cfda816c025c805d4c3cd4c.json @@ -0,0 +1,42 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", CONCAT(coalesce(v2_as_completed_job.logs, ''), coalesce(job_logs.logs, '')) as logs, job_logs.log_offset, job_logs.log_file_index\n FROM v2_as_completed_job \n LEFT JOIN job_logs ON job_logs.job_id = v2_as_completed_job.id \n WHERE v2_as_completed_job.id = $1 AND v2_as_completed_job.workspace_id = $2 AND ($3::text[] IS NULL OR v2_as_completed_job.tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "log_file_index", + "type_info": "TextArray" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + null, + false, + true + ] + }, + "hash": "2c5a1b1ebd872ce8a03a1b7e5246c876750c85075cfda816c025c805d4c3cd4c" +} diff --git a/backend/.sqlx/query-2e66f54a02c66b0e95d62c4a338b5309320e484359a8b73bc6b3b2e9bc2fd651.json b/backend/.sqlx/query-2e66f54a02c66b0e95d62c4a338b5309320e484359a8b73bc6b3b2e9bc2fd651.json new file mode 100644 index 0000000000000..6cb97954d3371 --- /dev/null +++ b/backend/.sqlx/query-2e66f54a02c66b0e95d62c4a338b5309320e484359a8b73bc6b3b2e9bc2fd651.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET flow_status = jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], jsonb_set(jsonb_set('{}'::jsonb, '{scheduled_for}', to_jsonb(now()::text)), '{name}', to_jsonb($3::text))) WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "2e66f54a02c66b0e95d62c4a338b5309320e484359a8b73bc6b3b2e9bc2fd651" +} diff --git a/backend/.sqlx/query-2f06fd0d42f41e4773b965933e80ed30e7d404befdd9d556bdc9cde3ab6f790f.json b/backend/.sqlx/query-2f06fd0d42f41e4773b965933e80ed30e7d404befdd9d556bdc9cde3ab6f790f.json new file mode 100644 index 0000000000000..dc2e8e1894b9a --- /dev/null +++ b/backend/.sqlx/query-2f06fd0d42f41e4773b965933e80ed30e7d404befdd9d556bdc9cde3ab6f790f.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'approvers'], $2)\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "2f06fd0d42f41e4773b965933e80ed30e7d404befdd9d556bdc9cde3ab6f790f" +} diff --git a/backend/.sqlx/query-2f2ef9b1ccff527c48fa01cf1b78cd0e58c8d534ac22ec0356d82a854b31d087.json b/backend/.sqlx/query-2f2ef9b1ccff527c48fa01cf1b78cd0e58c8d534ac22ec0356d82a854b31d087.json new file mode 100644 index 0000000000000..77e16162bd600 --- /dev/null +++ b/backend/.sqlx/query-2f2ef9b1ccff527c48fa01cf1b78cd0e58c8d534ac22ec0356d82a854b31d087.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_runtime SET ping = NULL\n WHERE id = $1 AND ping = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Timestamptz" + ] + }, + "nullable": [] + }, + "hash": "2f2ef9b1ccff527c48fa01cf1b78cd0e58c8d534ac22ec0356d82a854b31d087" +} diff --git a/backend/.sqlx/query-31df83e9eb6078e93ec5fe4168306caccb849db9e0f71d86da655b01c6a3e8d0.json b/backend/.sqlx/query-31df83e9eb6078e93ec5fe4168306caccb849db9e0f71d86da655b01c6a3e8d0.json new file mode 100644 index 0000000000000..b96c05d674537 --- /dev/null +++ b/backend/.sqlx/query-31df83e9eb6078e93ec5fe4168306caccb849db9e0f71d86da655b01c6a3e8d0.json @@ -0,0 +1,27 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_queue\n (workspace_id, id, running, scheduled_for, started_at, tag, priority)\n VALUES ($1, $2, $3, COALESCE($4, now()), CASE WHEN $3 THEN now() END, $5, $6) RETURNING id AS \"id!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Uuid", + "Bool", + "Timestamptz", + "Varchar", + "Int2" + ] + }, + "nullable": [ + false + ] + }, + "hash": "31df83e9eb6078e93ec5fe4168306caccb849db9e0f71d86da655b01c6a3e8d0" +} diff --git a/backend/.sqlx/query-31f6f0712f1f53362628a487e8cd35d6d711b05d6eefffdc8e6a1b9c9bc7085d.json b/backend/.sqlx/query-31f6f0712f1f53362628a487e8cd35d6d711b05d6eefffdc8e6a1b9c9bc7085d.json new file mode 100644 index 0000000000000..cff7935a888f3 --- /dev/null +++ b/backend/.sqlx/query-31f6f0712f1f53362628a487e8cd35d6d711b05d6eefffdc8e6a1b9c9bc7085d.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT raw_code, raw_lock, raw_flow AS \"raw_flow: Json>\" FROM v2_job WHERE id = $1 LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "raw_code", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "raw_lock", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "raw_flow: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true, + true, + true + ] + }, + "hash": "31f6f0712f1f53362628a487e8cd35d6d711b05d6eefffdc8e6a1b9c9bc7085d" +} diff --git a/backend/.sqlx/query-33351de09c72ccc0a39eb977d26f867595813bfa1ae0b26bc4181780801294bf.json b/backend/.sqlx/query-33351de09c72ccc0a39eb977d26f867595813bfa1ae0b26bc4181780801294bf.json new file mode 100644 index 0000000000000..a3d39e0b59b74 --- /dev/null +++ b/backend/.sqlx/query-33351de09c72ccc0a39eb977d26f867595813bfa1ae0b26bc4181780801294bf.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT success AS \"success!\" FROM v2_as_completed_job WHERE id = ANY($1)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "UuidArray" + ] + }, + "nullable": [ + true + ] + }, + "hash": "33351de09c72ccc0a39eb977d26f867595813bfa1ae0b26bc4181780801294bf" +} diff --git a/backend/.sqlx/query-4244640e62fffb0f6978f8f7d78291b3294a6a7d1549d752f14acf5972552ba5.json b/backend/.sqlx/query-4244640e62fffb0f6978f8f7d78291b3294a6a7d1549d752f14acf5972552ba5.json new file mode 100644 index 0000000000000..841d549e20564 --- /dev/null +++ b/backend/.sqlx/query-4244640e62fffb0f6978f8f7d78291b3294a6a7d1549d752f14acf5972552ba5.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET workspace_id = $1 WHERE workspace_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "4244640e62fffb0f6978f8f7d78291b3294a6a7d1549d752f14acf5972552ba5" +} diff --git a/backend/.sqlx/query-43aa468aac174529a74e6108af55a383f8a20e98b8c502929f4dc5041a55e72f.json b/backend/.sqlx/query-43aa468aac174529a74e6108af55a383f8a20e98b8c502929f4dc5041a55e72f.json new file mode 100644 index 0000000000000..e33b0dc0b8067 --- /dev/null +++ b/backend/.sqlx/query-43aa468aac174529a74e6108af55a383f8a20e98b8c502929f4dc5041a55e72f.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS (\n SELECT 1 FROM v2_as_completed_job \n WHERE workspace_id = $2 \n AND (job_kind = 'appscript' OR job_kind = 'preview')\n AND created_by = 'anonymous' \n AND started_at > now() - interval '3 hours'\n AND script_path LIKE $3 || '/%' \n AND result @> ('{\"s3\":\"' || $1 || '\"}')::jsonb \n )", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "43aa468aac174529a74e6108af55a383f8a20e98b8c502929f4dc5041a55e72f" +} diff --git a/backend/.sqlx/query-4535c8effd1bae49894d13293a37e1ee949cf9108239032cb3addbf350fb33de.json b/backend/.sqlx/query-4535c8effd1bae49894d13293a37e1ee949cf9108239032cb3addbf350fb33de.json new file mode 100644 index 0000000000000..a500b047858f7 --- /dev/null +++ b/backend/.sqlx/query-4535c8effd1bae49894d13293a37e1ee949cf9108239032cb3addbf350fb33de.json @@ -0,0 +1,67 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n job_kind AS \"job_kind!: JobKind\",\n script_hash AS \"script_hash: ScriptHash\",\n flow_status AS \"flow_status!: Json>\",\n raw_flow AS \"raw_flow: Json>\"\n FROM v2_as_queue WHERE id = $1 AND workspace_id = $2 LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "job_kind!: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 1, + "name": "script_hash: ScriptHash", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "flow_status!: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "raw_flow: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "4535c8effd1bae49894d13293a37e1ee949cf9108239032cb3addbf350fb33de" +} diff --git a/backend/.sqlx/query-45affcb4aeabf80d825628002903709851edd9db133494f2ff07e40c020f4be1.json b/backend/.sqlx/query-45affcb4aeabf80d825628002903709851edd9db133494f2ff07e40c020f4be1.json new file mode 100644 index 0000000000000..cf7c45cc6f710 --- /dev/null +++ b/backend/.sqlx/query-45affcb4aeabf80d825628002903709851edd9db133494f2ff07e40c020f4be1.json @@ -0,0 +1,75 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result AS \"result: sqlx::types::Json>\", success AS \"success!\",\n language AS \"language: ScriptLang\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n created_by AS \"created_by!\"\n FROM v2_as_completed_job\n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "success!", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 4, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + true, + true, + true, + true + ] + }, + "hash": "45affcb4aeabf80d825628002903709851edd9db133494f2ff07e40c020f4be1" +} diff --git a/backend/.sqlx/query-46046b1435d1f01e6f7cafe054367d88ddbdfe81237b72d5efbc630278e44bb5.json b/backend/.sqlx/query-46046b1435d1f01e6f7cafe054367d88ddbdfe81237b72d5efbc630278e44bb5.json new file mode 100644 index 0000000000000..d56b85cd62d61 --- /dev/null +++ b/backend/.sqlx/query-46046b1435d1f01e6f7cafe054367d88ddbdfe81237b72d5efbc630278e44bb5.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_runtime r SET\n memory_peak = $1,\n ping = now()\n FROM v2_job_queue q\n WHERE r.id = $2 AND q.id = r.id\n RETURNING canceled_by IS NOT NULL AS \"canceled!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "canceled!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "46046b1435d1f01e6f7cafe054367d88ddbdfe81237b72d5efbc630278e44bb5" +} diff --git a/backend/.sqlx/query-464c51a8ea8c06232d33c45b8e59008b382c445476117a2311030734bc6d6d53.json b/backend/.sqlx/query-464c51a8ea8c06232d33c45b8e59008b382c445476117a2311030734bc6d6d53.json new file mode 100644 index 0000000000000..df664b5b8bf21 --- /dev/null +++ b/backend/.sqlx/query-464c51a8ea8c06232d33c45b8e59008b382c445476117a2311030734bc6d6d53.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_runtime SET ping = now() WHERE id = $1 AND ping < now()", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "464c51a8ea8c06232d33c45b8e59008b382c445476117a2311030734bc6d6d53" +} diff --git a/backend/.sqlx/query-46ac665ac5662bd89cdc92cd84d852b49429f42e394aeec9fb82c53f8bcadc54.json b/backend/.sqlx/query-46ac665ac5662bd89cdc92cd84d852b49429f42e394aeec9fb82c53f8bcadc54.json new file mode 100644 index 0000000000000..aab1277e7ead3 --- /dev/null +++ b/backend/.sqlx/query-46ac665ac5662bd89cdc92cd84d852b49429f42e394aeec9fb82c53f8bcadc54.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue q SET running = false, started_at = null\n FROM v2_job j, v2_job_runtime r\n WHERE j.id = q.id AND j.id = r.id\n AND ping < now() - ($1 || ' seconds')::interval\n AND running = true\n AND kind NOT IN ('flow', 'flowpreview', 'flownode', 'singlescriptflow')\n AND same_worker = false\n RETURNING q.id, q.workspace_id, ping", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "workspace_id", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "ping", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + true + ] + }, + "hash": "46ac665ac5662bd89cdc92cd84d852b49429f42e394aeec9fb82c53f8bcadc54" +} diff --git a/backend/.sqlx/query-47e6b25cc092ec8718a6581c76aca10b275653e10ea4aa17a8ef5091ca09294a.json b/backend/.sqlx/query-47e6b25cc092ec8718a6581c76aca10b275653e10ea4aa17a8ef5091ca09294a.json new file mode 100644 index 0000000000000..3206f22bc5963 --- /dev/null +++ b/backend/.sqlx/query-47e6b25cc092ec8718a6581c76aca10b275653e10ea4aa17a8ef5091ca09294a.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result, id\n FROM v2_job_completed\n WHERE id = ANY($1) AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "UuidArray", + "Text" + ] + }, + "nullable": [ + true, + false + ] + }, + "hash": "47e6b25cc092ec8718a6581c76aca10b275653e10ea4aa17a8ef5091ca09294a" +} diff --git a/backend/.sqlx/query-47fb4bceddd36fa60ccbe84f6341436fb133f0b3c97abb1267a9c12326dd1a33.json b/backend/.sqlx/query-47fb4bceddd36fa60ccbe84f6341436fb133f0b3c97abb1267a9c12326dd1a33.json new file mode 100644 index 0000000000000..9e1d80c2f627e --- /dev/null +++ b/backend/.sqlx/query-47fb4bceddd36fa60ccbe84f6341436fb133f0b3c97abb1267a9c12326dd1a33.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", args as \"args: sqlx::types::Json>\"\n FROM v2_as_completed_job \n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "args: sqlx::types::Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "47fb4bceddd36fa60ccbe84f6341436fb133f0b3c97abb1267a9c12326dd1a33" +} diff --git a/backend/.sqlx/query-488dd591096b2b47787afdc3a1d73917ed13269f2ee20b86df79fca2c8efe672.json b/backend/.sqlx/query-488dd591096b2b47787afdc3a1d73917ed13269f2ee20b86df79fca2c8efe672.json new file mode 100644 index 0000000000000..8622eb9125a53 --- /dev/null +++ b/backend/.sqlx/query-488dd591096b2b47787afdc3a1d73917ed13269f2ee20b86df79fca2c8efe672.json @@ -0,0 +1,90 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH uuid_table as (\n select gen_random_uuid() as uuid from generate_series(1, $16)\n )\n INSERT INTO v2_job\n (id, workspace_id, raw_code, raw_lock, raw_flow, tag, runnable_id, runnable_path, kind,\n script_lang, created_by, permissioned_as, permissioned_as_email, concurrent_limit,\n concurrency_time_window_s, timeout, args)\n (SELECT uuid, $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15,\n ('{ \"uuid\": \"' || uuid || '\" }')::jsonb FROM uuid_table)\n RETURNING id AS \"id!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Text", + "Jsonb", + "Varchar", + "Int8", + "Varchar", + { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + }, + { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + }, + "Varchar", + "Varchar", + "Varchar", + "Int4", + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "488dd591096b2b47787afdc3a1d73917ed13269f2ee20b86df79fca2c8efe672" +} diff --git a/backend/.sqlx/query-4a26b37d07e8ef22b34c7d94c194865c60310ef666fc1170d64df50548dddf7b.json b/backend/.sqlx/query-4a26b37d07e8ef22b34c7d94c194865c60310ef666fc1170d64df50548dddf7b.json new file mode 100644 index 0000000000000..ffd505e8f46b3 --- /dev/null +++ b/backend/.sqlx/query-4a26b37d07e8ef22b34c7d94c194865c60310ef666fc1170d64df50548dddf7b.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = JSONB_SET(\n flow_status,\n ARRAY['modules', $1::TEXT, 'branchall', 'branch'],\n ((flow_status->'modules'->$1::int->'branchall'->>'branch')::int + 1)::text::jsonb\n )\n WHERE id = $2\n RETURNING (flow_status->'modules'->$1::int->'branchall'->>'branch')::int", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "int4", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "4a26b37d07e8ef22b34c7d94c194865c60310ef666fc1170d64df50548dddf7b" +} diff --git a/backend/.sqlx/query-4c5502cdf49a59ce6d8bdce90e1cba3a2bc575dc69f6aa01fc6df1b5d095ab41.json b/backend/.sqlx/query-4c5502cdf49a59ce6d8bdce90e1cba3a2bc575dc69f6aa01fc6df1b5d095ab41.json new file mode 100644 index 0000000000000..6651f3d2d9555 --- /dev/null +++ b/backend/.sqlx/query-4c5502cdf49a59ce6d8bdce90e1cba3a2bc575dc69f6aa01fc6df1b5d095ab41.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_completed SET result = '{}'::jsonb WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "4c5502cdf49a59ce6d8bdce90e1cba3a2bc575dc69f6aa01fc6df1b5d095ab41" +} diff --git a/backend/.sqlx/query-4d4cc1ed4857edb473dc9397ca90de3797d671a7c0395d16eec8a2f84c1fe3d1.json b/backend/.sqlx/query-4d4cc1ed4857edb473dc9397ca90de3797d671a7c0395d16eec8a2f84c1fe3d1.json new file mode 100644 index 0000000000000..1b237fbb7b339 --- /dev/null +++ b/backend/.sqlx/query-4d4cc1ed4857edb473dc9397ca90de3797d671a7c0395d16eec8a2f84c1fe3d1.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET args = '{}'::jsonb WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "4d4cc1ed4857edb473dc9397ca90de3797d671a7c0395d16eec8a2f84c1fe3d1" +} diff --git a/backend/.sqlx/query-4eb44e8ea9313646749bb73dfa6237081246a2985eec1d0d933dfedf634a7191.json b/backend/.sqlx/query-4eb44e8ea9313646749bb73dfa6237081246a2985eec1d0d933dfedf634a7191.json new file mode 100644 index 0000000000000..ffb6d3eee2aec --- /dev/null +++ b/backend/.sqlx/query-4eb44e8ea9313646749bb73dfa6237081246a2985eec1d0d933dfedf634a7191.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT coalesce(COUNT(*), 0) as \"database_length!\", null::bigint as suspended FROM v2_job_completed WHERE workspace_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "database_length!", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "suspended", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null, + null + ] + }, + "hash": "4eb44e8ea9313646749bb73dfa6237081246a2985eec1d0d933dfedf634a7191" +} diff --git a/backend/.sqlx/query-519f4f76649947f036a2129c11e92ef0ad30e39eec59c27bae8cb0622062c8fb.json b/backend/.sqlx/query-519f4f76649947f036a2129c11e92ef0ad30e39eec59c27bae8cb0622062c8fb.json new file mode 100644 index 0000000000000..b2c0b4702c512 --- /dev/null +++ b/backend/.sqlx/query-519f4f76649947f036a2129c11e92ef0ad30e39eec59c27bae8cb0622062c8fb.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM v2_as_queue WHERE canceled = false AND (scheduled_for <= now()\n OR (suspend_until IS NOT NULL\n AND ( suspend <= 0\n OR suspend_until <= now())))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "519f4f76649947f036a2129c11e92ef0ad30e39eec59c27bae8cb0622062c8fb" +} diff --git a/backend/.sqlx/query-51ddbfec67af268d9bbee12b2730d6109d2a6633e62ce708bad8af1a9f8c3925.json b/backend/.sqlx/query-51ddbfec67af268d9bbee12b2730d6109d2a6633e62ce708bad8af1a9f8c3925.json new file mode 100644 index 0000000000000..e664011467d06 --- /dev/null +++ b/backend/.sqlx/query-51ddbfec67af268d9bbee12b2730d6109d2a6633e62ce708bad8af1a9f8c3925.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH ping AS (UPDATE v2_job_runtime SET ping = NULL WHERE id = $2 RETURNING id)\n UPDATE v2_job_queue SET\n running = false,\n started_at = null,\n scheduled_for = $1\n WHERE id = (SELECT id FROM ping)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Timestamptz", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "51ddbfec67af268d9bbee12b2730d6109d2a6633e62ce708bad8af1a9f8c3925" +} diff --git a/backend/.sqlx/query-55002cccf17e32af5b077a17707122cfeeeebf5a9392c6798a486274d8d233d5.json b/backend/.sqlx/query-55002cccf17e32af5b077a17707122cfeeeebf5a9392c6798a486274d8d233d5.json new file mode 100644 index 0000000000000..f9d007d4c1dd1 --- /dev/null +++ b/backend/.sqlx/query-55002cccf17e32af5b077a17707122cfeeeebf5a9392c6798a486274d8d233d5.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM v2_job WHERE id = $1 AND workspace_id = $2\n UNION ALL\n SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM input WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "args", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "Bool" + ] + }, + "nullable": [ + null + ] + }, + "hash": "55002cccf17e32af5b077a17707122cfeeeebf5a9392c6798a486274d8d233d5" +} diff --git a/backend/.sqlx/query-56d6b1f96680f6963b674c67b85d539d8fac138f409da7f712fcba0b21c572ab.json b/backend/.sqlx/query-56d6b1f96680f6963b674c67b85d539d8fac138f409da7f712fcba0b21c572ab.json new file mode 100644 index 0000000000000..0a0500532653c --- /dev/null +++ b/backend/.sqlx/query-56d6b1f96680f6963b674c67b85d539d8fac138f409da7f712fcba0b21c572ab.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = JSONB_SET(JSONB_SET(flow_status, ARRAY['retry'], $1), ARRAY['modules', $3::TEXT, 'failed_retries'], $4)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid", + "Text", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "56d6b1f96680f6963b674c67b85d539d8fac138f409da7f712fcba0b21c572ab" +} diff --git a/backend/.sqlx/query-572492b357bc4e2576d77ed135682f4623c74e61759dda257bd1f633f1a2b725.json b/backend/.sqlx/query-572492b357bc4e2576d77ed135682f4623c74e61759dda257bd1f633f1a2b725.json new file mode 100644 index 0000000000000..d825f28c4d94a --- /dev/null +++ b/backend/.sqlx/query-572492b357bc4e2576d77ed135682f4623c74e61759dda257bd1f633f1a2b725.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS(SELECT 1 FROM v2_job_completed WHERE id = $1 AND workspace_id = $2)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "572492b357bc4e2576d77ed135682f4623c74e61759dda257bd1f633f1a2b725" +} diff --git a/backend/.sqlx/query-58c0eb36b630d5eba9d12edca672ebd56e13193395701e26328a243055bee6b8.json b/backend/.sqlx/query-58c0eb36b630d5eba9d12edca672ebd56e13193395701e26328a243055bee6b8.json new file mode 100644 index 0000000000000..b3a47144042e4 --- /dev/null +++ b/backend/.sqlx/query-58c0eb36b630d5eba9d12edca672ebd56e13193395701e26328a243055bee6b8.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT usage.usage + 1 FROM usage \n WHERE is_workspace IS TRUE AND\n month_ = EXTRACT(YEAR FROM current_date) * 12 + EXTRACT(MONTH FROM current_date)\n AND id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "58c0eb36b630d5eba9d12edca672ebd56e13193395701e26328a243055bee6b8" +} diff --git a/backend/.sqlx/query-5962a611bf5823336340fe8d52dd7634fb3aee2af76eb9c7a2abe238159c3e22.json b/backend/.sqlx/query-5962a611bf5823336340fe8d52dd7634fb3aee2af76eb9c7a2abe238159c3e22.json new file mode 100644 index 0000000000000..f5f670f67ddce --- /dev/null +++ b/backend/.sqlx/query-5962a611bf5823336340fe8d52dd7634fb3aee2af76eb9c7a2abe238159c3e22.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = jsonb_set(\n jsonb_set(flow_status, ARRAY['failure_module', 'job'], to_jsonb($1::UUID::TEXT)),\n ARRAY['failure_module', 'type'],\n to_jsonb('InProgress'::text)\n )\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "5962a611bf5823336340fe8d52dd7634fb3aee2af76eb9c7a2abe238159c3e22" +} diff --git a/backend/.sqlx/query-597b148ff09a1e0f369bb04781ee4e429ebce64a4d5c16b0f136142ad213cdb1.json b/backend/.sqlx/query-597b148ff09a1e0f369bb04781ee4e429ebce64a4d5c16b0f136142ad213cdb1.json new file mode 100644 index 0000000000000..4a704e58d85be --- /dev/null +++ b/backend/.sqlx/query-597b148ff09a1e0f369bb04781ee4e429ebce64a4d5c16b0f136142ad213cdb1.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n args AS \"args: Json>>\"\n FROM v2_job\n WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "args: Json>>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "597b148ff09a1e0f369bb04781ee4e429ebce64a4d5c16b0f136142ad213cdb1" +} diff --git a/backend/.sqlx/query-59f58aed612ef6b87b9e86c8ac752c0359cc618af85b5d86ff8e36ae381d5c69.json b/backend/.sqlx/query-59f58aed612ef6b87b9e86c8ac752c0359cc618af85b5d86ff8e36ae381d5c69.json new file mode 100644 index 0000000000000..b8d137b7c244d --- /dev/null +++ b/backend/.sqlx/query-59f58aed612ef6b87b9e86c8ac752c0359cc618af85b5d86ff8e36ae381d5c69.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result AS \"result: SqlxJson>\", success AS \"success!\"\n FROM v2_as_completed_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: SqlxJson>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "59f58aed612ef6b87b9e86c8ac752c0359cc618af85b5d86ff8e36ae381d5c69" +} diff --git a/backend/.sqlx/query-5bd7425430868a956cf27c442f51480681a623eba0ca97d690c50f1c4cd1613b.json b/backend/.sqlx/query-5bd7425430868a956cf27c442f51480681a623eba0ca97d690c50f1c4cd1613b.json new file mode 100644 index 0000000000000..28367951c82e3 --- /dev/null +++ b/backend/.sqlx/query-5bd7425430868a956cf27c442f51480681a623eba0ca97d690c50f1c4cd1613b.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_completed AS cj\n ( workspace_id\n , id\n , duration_ms\n , result\n , canceled_by\n , canceled_reason\n , flow_status\n , status\n , worker\n )\n SELECT q.workspace_id\n , q.id\n , 0\n , $4\n , $1\n , 'cancel all'\n , (SELECT flow_status FROM v2_job_flow_runtime WHERE id = q.id)\n , 'canceled'::job_status\n , worker\n FROM v2_job_queue q\n JOIN v2_job USING (id)\n WHERE q.id = any($2) AND running = false AND parent_job IS NULL AND q.workspace_id = $3 AND trigger IS NULL\n FOR UPDATE SKIP LOCKED\n ON CONFLICT (id) DO NOTHING RETURNING id AS \"id!\"", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "UuidArray", + "Text", + "Jsonb" + ] + }, + "nullable": [ + false + ] + }, + "hash": "5bd7425430868a956cf27c442f51480681a623eba0ca97d690c50f1c4cd1613b" +} diff --git a/backend/.sqlx/query-5d1cb6b4dccc6e26c969dce4c5ad294361808a1f8d45885ea772ae553d540f7c.json b/backend/.sqlx/query-5d1cb6b4dccc6e26c969dce4c5ad294361808a1f8d45885ea772ae553d540f7c.json new file mode 100644 index 0000000000000..d755214a83849 --- /dev/null +++ b/backend/.sqlx/query-5d1cb6b4dccc6e26c969dce4c5ad294361808a1f8d45885ea772ae553d540f7c.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2)\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "5d1cb6b4dccc6e26c969dce4c5ad294361808a1f8d45885ea772ae553d540f7c" +} diff --git a/backend/.sqlx/query-64bead2fca9b715d8adb23fc44e35270cf56aa9bf7af16b198659d638a4d86eb.json b/backend/.sqlx/query-64bead2fca9b715d8adb23fc44e35270cf56aa9bf7af16b198659d638a4d86eb.json new file mode 100644 index 0000000000000..1cf9a7a978dee --- /dev/null +++ b/backend/.sqlx/query-64bead2fca9b715d8adb23fc44e35270cf56aa9bf7af16b198659d638a4d86eb.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = jsonb_set(\n jsonb_set(flow_status, ARRAY['preprocessor_module', 'job'], to_jsonb($1::UUID::TEXT)),\n ARRAY['preprocessor_module', 'type'],\n to_jsonb('InProgress'::text)\n )\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "64bead2fca9b715d8adb23fc44e35270cf56aa9bf7af16b198659d638a4d86eb" +} diff --git a/backend/.sqlx/query-656b99355d9668925a848a5518168b623a2fb015e6cc1baa4e3b974a8bc458f6.json b/backend/.sqlx/query-656b99355d9668925a848a5518168b623a2fb015e6cc1baa4e3b974a8bc458f6.json new file mode 100644 index 0000000000000..ca7a34b71c089 --- /dev/null +++ b/backend/.sqlx/query-656b99355d9668925a848a5518168b623a2fb015e6cc1baa4e3b974a8bc458f6.json @@ -0,0 +1,69 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result AS \"result: sqlx::types::Json>\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n language AS \"language: ScriptLang\",\n created_by AS \"created_by!\"\n FROM v2_as_completed_job\n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "656b99355d9668925a848a5518168b623a2fb015e6cc1baa4e3b974a8bc458f6" +} diff --git a/backend/.sqlx/query-67afe352fc26dda9107c90e50e954642d877178ce2c0e73b72c3824135ef86f4.json b/backend/.sqlx/query-67afe352fc26dda9107c90e50e954642d877178ce2c0e73b72c3824135ef86f4.json new file mode 100644 index 0000000000000..a09be9741efa3 --- /dev/null +++ b/backend/.sqlx/query-67afe352fc26dda9107c90e50e954642d877178ce2c0e73b72c3824135ef86f4.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO job_logs (job_id, logs)\n VALUES ($1, 'Restarted job after not receiving job''s ping for too long the ' || now() || '\n\n')\n ON CONFLICT (job_id) DO UPDATE SET logs = job_logs.logs || '\n' || EXCLUDED.logs\n WHERE job_logs.job_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "67afe352fc26dda9107c90e50e954642d877178ce2c0e73b72c3824135ef86f4" +} diff --git a/backend/.sqlx/query-689649d4a2d1e73e2616cbb647e0a69f911d47d7c71c6021112900f7fd560c85.json b/backend/.sqlx/query-689649d4a2d1e73e2616cbb647e0a69f911d47d7c71c6021112900f7fd560c85.json new file mode 100644 index 0000000000000..e332f39c01afc --- /dev/null +++ b/backend/.sqlx/query-689649d4a2d1e73e2616cbb647e0a69f911d47d7c71c6021112900f7fd560c85.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT flow_root_job FROM v2_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "flow_root_job", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "689649d4a2d1e73e2616cbb647e0a69f911d47d7c71c6021112900f7fd560c85" +} diff --git a/backend/.sqlx/query-689b9b39c2e51440b428b521a60eca212d91bb79365a117d754f1d47d8f1ba23.json b/backend/.sqlx/query-689b9b39c2e51440b428b521a60eca212d91bb79365a117d754f1d47d8f1ba23.json new file mode 100644 index 0000000000000..c64d090bd18f8 --- /dev/null +++ b/backend/.sqlx/query-689b9b39c2e51440b428b521a60eca212d91bb79365a117d754f1d47d8f1ba23.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_completed SET result = '{}'::jsonb WHERE id = ANY($1)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray" + ] + }, + "nullable": [] + }, + "hash": "689b9b39c2e51440b428b521a60eca212d91bb79365a117d754f1d47d8f1ba23" +} diff --git a/backend/.sqlx/query-6ab112fa42a9ae332bfa30427b70fa742351c5c180ac3de106df54f7badb494c.json b/backend/.sqlx/query-6ab112fa42a9ae332bfa30427b70fa742351c5c180ac3de106df54f7badb494c.json new file mode 100644 index 0000000000000..ef26a8453e227 --- /dev/null +++ b/backend/.sqlx/query-6ab112fa42a9ae332bfa30427b70fa742351c5c180ac3de106df54f7badb494c.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(id) FROM v2_as_queue WHERE email = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "6ab112fa42a9ae332bfa30427b70fa742351c5c180ac3de106df54f7badb494c" +} diff --git a/backend/.sqlx/query-6b4a47300708e7b34a621a8a7fb9cbf0df8221a7734a0d97803e30071cc87566.json b/backend/.sqlx/query-6b4a47300708e7b34a621a8a7fb9cbf0df8221a7734a0d97803e30071cc87566.json new file mode 100644 index 0000000000000..08ecf50236ff9 --- /dev/null +++ b/backend/.sqlx/query-6b4a47300708e7b34a621a8a7fb9cbf0df8221a7734a0d97803e30071cc87566.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = JSONB_SET(\n JSONB_SET(flow_status, ARRAY['preprocessor_module'], $1),\n ARRAY['step'],\n $2\n )\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "6b4a47300708e7b34a621a8a7fb9cbf0df8221a7734a0d97803e30071cc87566" +} diff --git a/backend/.sqlx/query-6bdb3fcfe16fc40222dc7010a11026d4d4e0d381b31fe02da7d2667c0cdc1a85.json b/backend/.sqlx/query-6bdb3fcfe16fc40222dc7010a11026d4d4e0d381b31fe02da7d2667c0cdc1a85.json new file mode 100644 index 0000000000000..c498b1790a2fd --- /dev/null +++ b/backend/.sqlx/query-6bdb3fcfe16fc40222dc7010a11026d4d4e0d381b31fe02da7d2667c0cdc1a85.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_completed SET workspace_id = $1 WHERE workspace_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "6bdb3fcfe16fc40222dc7010a11026d4d4e0d381b31fe02da7d2667c0cdc1a85" +} diff --git a/backend/.sqlx/query-6d5bc1fd92fbc9a97567a697e992f3aa0e058bf353fee1b0755a798e00061ccf.json b/backend/.sqlx/query-6d5bc1fd92fbc9a97567a697e992f3aa0e058bf353fee1b0755a798e00061ccf.json new file mode 100644 index 0000000000000..6b59759743fe6 --- /dev/null +++ b/backend/.sqlx/query-6d5bc1fd92fbc9a97567a697e992f3aa0e058bf353fee1b0755a798e00061ccf.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE v2_job_flow_runtime f SET flow_status = JSONB_SET(flow_status, ARRAY['user_states'], JSONB_SET(COALESCE(flow_status->'user_states', '{}'::jsonb), ARRAY[$1], $2))\n FROM v2_job j\n WHERE f.id = $3 AND f.id = j.id AND j.workspace_id = $4 AND kind IN ('flow', 'flowpreview', 'flownode') RETURNING 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "6d5bc1fd92fbc9a97567a697e992f3aa0e058bf353fee1b0755a798e00061ccf" +} diff --git a/backend/.sqlx/query-6daf2aca3e272a1efd7b26b4c80d6b490628f502ed9f9de75cc78d0fe196f260.json b/backend/.sqlx/query-6daf2aca3e272a1efd7b26b4c80d6b490628f502ed9f9de75cc78d0fe196f260.json new file mode 100644 index 0000000000000..d94865aafa3b2 --- /dev/null +++ b/backend/.sqlx/query-6daf2aca3e272a1efd7b26b4c80d6b490628f502ed9f9de75cc78d0fe196f260.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT running AS \"running!\" FROM v2_job_queue WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "running!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "6daf2aca3e272a1efd7b26b4c80d6b490628f502ed9f9de75cc78d0fe196f260" +} diff --git a/backend/.sqlx/query-6ff7a025f529c077c1b6c9632a367aa29e2f0fdac3f1984550484d5a06a6ea21.json b/backend/.sqlx/query-6ff7a025f529c077c1b6c9632a367aa29e2f0fdac3f1984550484d5a06a6ea21.json new file mode 100644 index 0000000000000..b045ed2b0706c --- /dev/null +++ b/backend/.sqlx/query-6ff7a025f529c077c1b6c9632a367aa29e2f0fdac3f1984550484d5a06a6ea21.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_runtime r SET\n memory_peak = $1,\n ping = now()\n FROM v2_job_queue q\n WHERE r.id = $2 AND q.id = r.id\n RETURNING canceled_by, canceled_reason", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "canceled_by", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "canceled_reason", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "6ff7a025f529c077c1b6c9632a367aa29e2f0fdac3f1984550484d5a06a6ea21" +} diff --git a/backend/.sqlx/query-70e740465e648d84d32a506916ed437d1e32d4b5bdc7f7fae22e804428660836.json b/backend/.sqlx/query-70e740465e648d84d32a506916ed437d1e32d4b5bdc7f7fae22e804428660836.json new file mode 100644 index 0000000000000..9278984d51e84 --- /dev/null +++ b/backend/.sqlx/query-70e740465e648d84d32a506916ed437d1e32d4b5bdc7f7fae22e804428660836.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH uuid_table as (\n select unnest($4::uuid[]) as uuid\n )\n INSERT INTO v2_job_queue\n (id, workspace_id, scheduled_for, tag)\n (SELECT uuid, $1, $2, $3 FROM uuid_table) \n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Timestamptz", + "Varchar", + "UuidArray" + ] + }, + "nullable": [ + false + ] + }, + "hash": "70e740465e648d84d32a506916ed437d1e32d4b5bdc7f7fae22e804428660836" +} diff --git a/backend/.sqlx/query-740eaa62280c83fda48f4505bbf488bee8b3e3fc8ee25a513c10b1376d69f81a.json b/backend/.sqlx/query-740eaa62280c83fda48f4505bbf488bee8b3e3fc8ee25a513c10b1376d69f81a.json new file mode 100644 index 0000000000000..417f8519732d2 --- /dev/null +++ b/backend/.sqlx/query-740eaa62280c83fda48f4505bbf488bee8b3e3fc8ee25a513c10b1376d69f81a.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = JSONB_SET(flow_status, ARRAY['retry'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "740eaa62280c83fda48f4505bbf488bee8b3e3fc8ee25a513c10b1376d69f81a" +} diff --git a/backend/.sqlx/query-7463c1eb3f690239c9277b62d935af4f372c07263c6f86bc89b0d25cb573fa80.json b/backend/.sqlx/query-7463c1eb3f690239c9277b62d935af4f372c07263c6f86bc89b0d25cb573fa80.json new file mode 100644 index 0000000000000..77ab2c28bdf0e --- /dev/null +++ b/backend/.sqlx/query-7463c1eb3f690239c9277b62d935af4f372c07263c6f86bc89b0d25cb573fa80.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_job WHERE workspace_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [] + }, + "hash": "7463c1eb3f690239c9277b62d935af4f372c07263c6f86bc89b0d25cb573fa80" +} diff --git a/backend/.sqlx/query-74dbd5a09255c30991078492ba3850e02ffbef25fbfd29cbedc041b0e439e580.json b/backend/.sqlx/query-74dbd5a09255c30991078492ba3850e02ffbef25fbfd29cbedc041b0e439e580.json new file mode 100644 index 0000000000000..77515a838f083 --- /dev/null +++ b/backend/.sqlx/query-74dbd5a09255c30991078492ba3850e02ffbef25fbfd29cbedc041b0e439e580.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\", flow_status AS \"flow_status!: Json\"\n FROM v2_as_completed_job\n WHERE parent_job = $1 AND workspace_id = $2 AND flow_status IS NOT NULL", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status!: Json", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "74dbd5a09255c30991078492ba3850e02ffbef25fbfd29cbedc041b0e439e580" +} diff --git a/backend/.sqlx/query-78e6e2bd3e009055812202e763294304872cf2136b15076f6712a77d12a648f1.json b/backend/.sqlx/query-78e6e2bd3e009055812202e763294304872cf2136b15076f6712a77d12a648f1.json new file mode 100644 index 0000000000000..04e381a0cf445 --- /dev/null +++ b/backend/.sqlx/query-78e6e2bd3e009055812202e763294304872cf2136b15076f6712a77d12a648f1.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue SET\n running = false,\n started_at = null\n WHERE id = $1 AND canceled_by IS NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "78e6e2bd3e009055812202e763294304872cf2136b15076f6712a77d12a648f1" +} diff --git a/backend/.sqlx/query-798a6bdc8f2fb9421808e084fbddf7de61fa9953998e3dc66af0e5f7491a65f4.json b/backend/.sqlx/query-798a6bdc8f2fb9421808e084fbddf7de61fa9953998e3dc66af0e5f7491a65f4.json new file mode 100644 index 0000000000000..15d39aa5396f7 --- /dev/null +++ b/backend/.sqlx/query-798a6bdc8f2fb9421808e084fbddf7de61fa9953998e3dc66af0e5f7491a65f4.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue SET suspend = 0 WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "798a6bdc8f2fb9421808e084fbddf7de61fa9953998e3dc66af0e5f7491a65f4" +} diff --git a/backend/.sqlx/query-7af1cf089022fc1c3597b270b69aa669a153ab0c0bb2807cd4f7fd405afa6f69.json b/backend/.sqlx/query-7af1cf089022fc1c3597b270b69aa669a153ab0c0bb2807cd4f7fd405afa6f69.json new file mode 100644 index 0000000000000..d2e54f45c89ae --- /dev/null +++ b/backend/.sqlx/query-7af1cf089022fc1c3597b270b69aa669a153ab0c0bb2807cd4f7fd405afa6f69.json @@ -0,0 +1,73 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n script_path, script_hash AS \"script_hash: ScriptHash\",\n job_kind AS \"job_kind!: JobKind\",\n flow_status AS \"flow_status: Json>\",\n raw_flow AS \"raw_flow: Json>\"\n FROM v2_as_completed_job WHERE id = $1 and workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "script_hash: ScriptHash", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "job_kind!: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "flow_status: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 4, + "name": "raw_flow: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true + ] + }, + "hash": "7af1cf089022fc1c3597b270b69aa669a153ab0c0bb2807cd4f7fd405afa6f69" +} diff --git a/backend/.sqlx/query-7b1e6b67a20ae1128118d5f5cc0db4007fb9dc6fd20582a46ebb951fca3a7abd.json b/backend/.sqlx/query-7b1e6b67a20ae1128118d5f5cc0db4007fb9dc6fd20582a46ebb951fca3a7abd.json new file mode 100644 index 0000000000000..a094b8580fd14 --- /dev/null +++ b/backend/.sqlx/query-7b1e6b67a20ae1128118d5f5cc0db4007fb9dc6fd20582a46ebb951fca3a7abd.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT CAST(ROUND(AVG(duration_ms), 0) AS BIGINT) AS avg_duration_s FROM\n (SELECT duration_ms FROM concurrency_key LEFT JOIN v2_as_completed_job ON v2_as_completed_job.id = concurrency_key.job_id WHERE key = $1 AND ended_at IS NOT NULL\n ORDER BY ended_at\n DESC LIMIT 10) AS t", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "avg_duration_s", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "7b1e6b67a20ae1128118d5f5cc0db4007fb9dc6fd20582a46ebb951fca3a7abd" +} diff --git a/backend/.sqlx/query-7c7234d8234fbaff4d42eb47d1e1ea1747ffe28986055c01a4661628e47c0e51.json b/backend/.sqlx/query-7c7234d8234fbaff4d42eb47d1e1ea1747ffe28986055c01a4661628e47c0e51.json new file mode 100644 index 0000000000000..b1ace4f3154eb --- /dev/null +++ b/backend/.sqlx/query-7c7234d8234fbaff4d42eb47d1e1ea1747ffe28986055c01a4661628e47c0e51.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_completed SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "7c7234d8234fbaff4d42eb47d1e1ea1747ffe28986055c01a4661628e47c0e51" +} diff --git a/backend/.sqlx/query-7cb0b49f5898b1adbf6579922aa02be4b9550b043240b2076e105976fedb789c.json b/backend/.sqlx/query-7cb0b49f5898b1adbf6579922aa02be4b9550b043240b2076e105976fedb789c.json new file mode 100644 index 0000000000000..3e507fe815c6b --- /dev/null +++ b/backend/.sqlx/query-7cb0b49f5898b1adbf6579922aa02be4b9550b043240b2076e105976fedb789c.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue SET canceled_by = $1, canceled_reason = $2, scheduled_for = now(), suspend = 0 WHERE id = $3 AND workspace_id = $4 AND (canceled_by IS NULL OR canceled_reason != $2) RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "7cb0b49f5898b1adbf6579922aa02be4b9550b043240b2076e105976fedb789c" +} diff --git a/backend/.sqlx/query-7cc627bc2823d3124c4d20c1c0b277948c1f579ef0f6419a113f38ee71048077.json b/backend/.sqlx/query-7cc627bc2823d3124c4d20c1c0b277948c1f579ef0f6419a113f38ee71048077.json new file mode 100644 index 0000000000000..58edb9cc3aa7c --- /dev/null +++ b/backend/.sqlx/query-7cc627bc2823d3124c4d20c1c0b277948c1f579ef0f6419a113f38ee71048077.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT (flow_status->'step')::integer as step, jsonb_array_length(flow_status->'modules') as len\n FROM v2_job_flow_runtime WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "step", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "len", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null, + null + ] + }, + "hash": "7cc627bc2823d3124c4d20c1c0b277948c1f579ef0f6419a113f38ee71048077" +} diff --git a/backend/.sqlx/query-7d07a717533bfcaf581f6655bc387095542490fbb4aae30ec7fa75c2dae98ec8.json b/backend/.sqlx/query-7d07a717533bfcaf581f6655bc387095542490fbb4aae30ec7fa75c2dae98ec8.json new file mode 100644 index 0000000000000..4e90a2a0ec9a1 --- /dev/null +++ b/backend/.sqlx/query-7d07a717533bfcaf581f6655bc387095542490fbb4aae30ec7fa75c2dae98ec8.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_runtime r SET\n ping = now()\n FROM v2_job_queue q\n WHERE r.id = $1 AND q.id = r.id\n AND q.workspace_id = $2\n AND canceled_by IS NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "7d07a717533bfcaf581f6655bc387095542490fbb4aae30ec7fa75c2dae98ec8" +} diff --git a/backend/.sqlx/query-8123ba05f6e7b9bd395175ee4ec0c36c3726b2da9ff3592589ac7e83df1c537c.json b/backend/.sqlx/query-8123ba05f6e7b9bd395175ee4ec0c36c3726b2da9ff3592589ac7e83df1c537c.json new file mode 100644 index 0000000000000..1126431b462b6 --- /dev/null +++ b/backend/.sqlx/query-8123ba05f6e7b9bd395175ee4ec0c36c3726b2da9ff3592589ac7e83df1c537c.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, flow_status AS \"flow_status!: Json\"\n FROM v2_job_completed WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status!: Json", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "8123ba05f6e7b9bd395175ee4ec0c36c3726b2da9ff3592589ac7e83df1c537c" +} diff --git a/backend/.sqlx/query-830297547ea33969f96a5c4c2b82a540f1d147948e1b1a7523b21151ffa22305.json b/backend/.sqlx/query-830297547ea33969f96a5c4c2b82a540f1d147948e1b1a7523b21151ffa22305.json new file mode 100644 index 0000000000000..2e67b21966708 --- /dev/null +++ b/backend/.sqlx/query-830297547ea33969f96a5c4c2b82a540f1d147948e1b1a7523b21151ffa22305.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n success AS \"success!\",\n result AS \"result: Json>\",\n started_at AS \"started_at!\"FROM v2_as_completed_job WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4\n ORDER BY created_at DESC\n LIMIT $5", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "result: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "started_at!", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Uuid", + "Int8" + ] + }, + "nullable": [ + true, + true, + true + ] + }, + "hash": "830297547ea33969f96a5c4c2b82a540f1d147948e1b1a7523b21151ffa22305" +} diff --git a/backend/.sqlx/query-8381cee8bec06f8eff847b22d25574fc2cbcb32fef9f44632a8bc4c5c487fc4f.json b/backend/.sqlx/query-8381cee8bec06f8eff847b22d25574fc2cbcb32fef9f44632a8bc4c5c487fc4f.json new file mode 100644 index 0000000000000..0a5c7ae2c85ce --- /dev/null +++ b/backend/.sqlx/query-8381cee8bec06f8eff847b22d25574fc2cbcb32fef9f44632a8bc4c5c487fc4f.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM v2_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "args", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "Bool" + ] + }, + "nullable": [ + null + ] + }, + "hash": "8381cee8bec06f8eff847b22d25574fc2cbcb32fef9f44632a8bc4c5c487fc4f" +} diff --git a/backend/.sqlx/query-8440081df15fa7874dded86b8af572d971d5892b7dd4c8e824b953113bd6c4a9.json b/backend/.sqlx/query-8440081df15fa7874dded86b8af572d971d5892b7dd4c8e824b953113bd6c4a9.json new file mode 100644 index 0000000000000..4a7ebbc0f7142 --- /dev/null +++ b/backend/.sqlx/query-8440081df15fa7874dded86b8af572d971d5892b7dd4c8e824b953113bd6c4a9.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT parent_job\n FROM v2_job\n WHERE id = $1 AND workspace_id = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "parent_job", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "8440081df15fa7874dded86b8af572d971d5892b7dd4c8e824b953113bd6c4a9" +} diff --git a/backend/.sqlx/query-84ad6f24aa9ccdc6f6e705b7de1bb707637a9bb1475711f9f9f3f2a6e28cb169.json b/backend/.sqlx/query-84ad6f24aa9ccdc6f6e705b7de1bb707637a9bb1475711f9f9f3f2a6e28cb169.json new file mode 100644 index 0000000000000..510e47e50c6fa --- /dev/null +++ b/backend/.sqlx/query-84ad6f24aa9ccdc6f6e705b7de1bb707637a9bb1475711f9f9f3f2a6e28cb169.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue SET suspend = $1 WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "84ad6f24aa9ccdc6f6e705b7de1bb707637a9bb1475711f9f9f3f2a6e28cb169" +} diff --git a/backend/.sqlx/query-8512298656d6872a2123c2801d842119537374a815c1292fac4c5b75bdd58898.json b/backend/.sqlx/query-8512298656d6872a2123c2801d842119537374a815c1292fac4c5b75bdd58898.json new file mode 100644 index 0000000000000..8390708116782 --- /dev/null +++ b/backend/.sqlx/query-8512298656d6872a2123c2801d842119537374a815c1292fac4c5b75bdd58898.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET runnable_path = REGEXP_REPLACE(runnable_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\\1') WHERE runnable_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "8512298656d6872a2123c2801d842119537374a815c1292fac4c5b75bdd58898" +} diff --git a/backend/.sqlx/query-85705fc3d7f8ba5f1b12d5fb222c38fc64deb1226aab9dc3bc4465324fce37d1.json b/backend/.sqlx/query-85705fc3d7f8ba5f1b12d5fb222c38fc64deb1226aab9dc3bc4465324fce37d1.json new file mode 100644 index 0000000000000..bce7324fb64cb --- /dev/null +++ b/backend/.sqlx/query-85705fc3d7f8ba5f1b12d5fb222c38fc64deb1226aab9dc3bc4465324fce37d1.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_queue (id, workspace_id, scheduled_for, tag) SELECT unnest($1::uuid[]), $2, now(), $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray", + "Varchar", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "85705fc3d7f8ba5f1b12d5fb222c38fc64deb1226aab9dc3bc4465324fce37d1" +} diff --git a/backend/.sqlx/query-86446941173c8fd5c672f9a0218d85b7b8667c1ae76c4c7bd2db52dc5a8c7778.json b/backend/.sqlx/query-86446941173c8fd5c672f9a0218d85b7b8667c1ae76c4c7bd2db52dc5a8c7778.json new file mode 100644 index 0000000000000..c50ebc4504f2a --- /dev/null +++ b/backend/.sqlx/query-86446941173c8fd5c672f9a0218d85b7b8667c1ae76c4c7bd2db52dc5a8c7778.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = JSONB_SET(flow_status, ARRAY['failure_module'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "86446941173c8fd5c672f9a0218d85b7b8667c1ae76c4c7bd2db52dc5a8c7778" +} diff --git a/backend/.sqlx/query-86cc1e3c18e936a700d8842a51a6f8377ec190669e2e22f8d511871d6fbe07b8.json b/backend/.sqlx/query-86cc1e3c18e936a700d8842a51a6f8377ec190669e2e22f8d511871d6fbe07b8.json new file mode 100644 index 0000000000000..47b30f28954a3 --- /dev/null +++ b/backend/.sqlx/query-86cc1e3c18e936a700d8842a51a6f8377ec190669e2e22f8d511871d6fbe07b8.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n success AS \"success!\",\n result AS \"result: Json>\",\n started_at AS \"started_at!\"\n FROM v2_as_completed_job\n WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4\n ORDER BY created_at DESC\n LIMIT $5", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "success!", + "type_info": "Bool" + }, + { + "ordinal": 1, + "name": "result: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "started_at!", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Uuid", + "Int8" + ] + }, + "nullable": [ + true, + true, + true + ] + }, + "hash": "86cc1e3c18e936a700d8842a51a6f8377ec190669e2e22f8d511871d6fbe07b8" +} diff --git a/backend/.sqlx/query-86e730a3481868a8d63824904df42788dd8a777d643d46d5086b5f8f33bbc97e.json b/backend/.sqlx/query-86e730a3481868a8d63824904df42788dd8a777d643d46d5086b5f8f33bbc97e.json new file mode 100644 index 0000000000000..70406daddded0 --- /dev/null +++ b/backend/.sqlx/query-86e730a3481868a8d63824904df42788dd8a777d643d46d5086b5f8f33bbc97e.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "VACUUM (skip_locked) v2_job_queue, v2_job_runtime, v2_job_flow_runtime", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "86e730a3481868a8d63824904df42788dd8a777d643d46d5086b5f8f33bbc97e" +} diff --git a/backend/.sqlx/query-87d1daaa2c2946b1be0ca4509d9746692c1112f524727fcc0d56479adca15011.json b/backend/.sqlx/query-87d1daaa2c2946b1be0ca4509d9746692c1112f524727fcc0d56479adca15011.json new file mode 100644 index 0000000000000..0fb2d003af5ca --- /dev/null +++ b/backend/.sqlx/query-87d1daaa2c2946b1be0ca4509d9746692c1112f524727fcc0d56479adca15011.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH suspend AS (\n UPDATE v2_job_queue SET suspend = $2, suspend_until = now() + $3\n WHERE id = $4\n RETURNING id\n ) UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(\n flow_status,\n ARRAY['modules', flow_status->>'step'::TEXT],\n $1\n ) WHERE id = (SELECT id FROM suspend)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Int4", + "Interval", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "87d1daaa2c2946b1be0ca4509d9746692c1112f524727fcc0d56479adca15011" +} diff --git a/backend/.sqlx/query-902a16d9e7ac34e7f1a0ad633bae754a0d64bc934a2b6434b9cb67f28d3ef950.json b/backend/.sqlx/query-902a16d9e7ac34e7f1a0ad633bae754a0d64bc934a2b6434b9cb67f28d3ef950.json new file mode 100644 index 0000000000000..7a6d47ba81157 --- /dev/null +++ b/backend/.sqlx/query-902a16d9e7ac34e7f1a0ad633bae754a0d64bc934a2b6434b9cb67f28d3ef950.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "902a16d9e7ac34e7f1a0ad633bae754a0d64bc934a2b6434b9cb67f28d3ef950" +} diff --git a/backend/.sqlx/query-90fbb9430ab03ce3aadd95cc263e5a3d1a91ea02de7608676575e1c03023ed71.json b/backend/.sqlx/query-90fbb9430ab03ce3aadd95cc263e5a3d1a91ea02de7608676575e1c03023ed71.json new file mode 100644 index 0000000000000..61598c0b2dfe8 --- /dev/null +++ b/backend/.sqlx/query-90fbb9430ab03ce3aadd95cc263e5a3d1a91ea02de7608676575e1c03023ed71.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT tag as \"tag!\", COUNT(*) as \"count!\"\n FROM v2_as_completed_job\n WHERE started_at > NOW() - make_interval(secs => $1) AND ($2::text IS NULL OR workspace_id = $2)\n GROUP BY tag\n ORDER BY \"count!\" DESC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tag!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Float8", + "Text" + ] + }, + "nullable": [ + true, + null + ] + }, + "hash": "90fbb9430ab03ce3aadd95cc263e5a3d1a91ea02de7608676575e1c03023ed71" +} diff --git a/backend/.sqlx/query-94831baa639d7546f98f24847c0f93697ee1edcee0acf4a0684a28ff66ef735a.json b/backend/.sqlx/query-94831baa639d7546f98f24847c0f93697ee1edcee0acf4a0684a28ff66ef735a.json new file mode 100644 index 0000000000000..01a61edd5ff8d --- /dev/null +++ b/backend/.sqlx/query-94831baa639d7546f98f24847c0f93697ee1edcee0acf4a0684a28ff66ef735a.json @@ -0,0 +1,48 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n substr(concat(coalesce(v2_as_completed_job.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs,\n mem_peak,\n CASE WHEN is_flow_step is true then NULL else flow_status END AS \"flow_status: sqlx::types::Json>\",\n job_logs.log_offset + char_length(job_logs.logs) + 1 AS log_offset,\n created_by AS \"created_by!\"\n FROM v2_as_completed_job\n LEFT JOIN job_logs ON job_logs.job_id = v2_as_completed_job.id \n WHERE v2_as_completed_job.workspace_id = $2 AND v2_as_completed_job.id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "mem_peak", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Int4", + "Text", + "Uuid" + ] + }, + "nullable": [ + null, + true, + null, + null, + true + ] + }, + "hash": "94831baa639d7546f98f24847c0f93697ee1edcee0acf4a0684a28ff66ef735a" +} diff --git a/backend/.sqlx/query-95ae90094ec0e2c22660cc2e3788b22231dab9c558723cc54894597ce4cd3d5a.json b/backend/.sqlx/query-95ae90094ec0e2c22660cc2e3788b22231dab9c558723cc54894597ce4cd3d5a.json new file mode 100644 index 0000000000000..d451727f46d96 --- /dev/null +++ b/backend/.sqlx/query-95ae90094ec0e2c22660cc2e3788b22231dab9c558723cc54894597ce4cd3d5a.json @@ -0,0 +1,42 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", CONCAT(coalesce(v2_as_queue.logs, ''), coalesce(job_logs.logs, '')) as logs, coalesce(job_logs.log_offset, 0) as log_offset, job_logs.log_file_index\n FROM v2_as_queue \n LEFT JOIN job_logs ON job_logs.job_id = v2_as_queue.id \n WHERE v2_as_queue.id = $1 AND v2_as_queue.workspace_id = $2 AND ($3::text[] IS NULL OR v2_as_queue.tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "logs", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "log_offset", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "log_file_index", + "type_info": "TextArray" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + true, + null, + null, + true + ] + }, + "hash": "95ae90094ec0e2c22660cc2e3788b22231dab9c558723cc54894597ce4cd3d5a" +} diff --git a/backend/.sqlx/query-992ebe3a73d0bd3b2122c08ca41b2d2deae208d7e95dd7d39884b7058a0512ff.json b/backend/.sqlx/query-992ebe3a73d0bd3b2122c08ca41b2d2deae208d7e95dd7d39884b7058a0512ff.json new file mode 100644 index 0000000000000..16e45505b8419 --- /dev/null +++ b/backend/.sqlx/query-992ebe3a73d0bd3b2122c08ca41b2d2deae208d7e95dd7d39884b7058a0512ff.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_flow_runtime (id, flow_status) SELECT unnest($1::uuid[]), $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "992ebe3a73d0bd3b2122c08ca41b2d2deae208d7e95dd7d39884b7058a0512ff" +} diff --git a/backend/.sqlx/query-9b2c42327378963b16eec5037537b8d9ae95c194b201bd1ce1d8f90925d7dba8.json b/backend/.sqlx/query-9b2c42327378963b16eec5037537b8d9ae95c194b201bd1ce1d8f90925d7dba8.json new file mode 100644 index 0000000000000..e73b49d4b0a6c --- /dev/null +++ b/backend/.sqlx/query-9b2c42327378963b16eec5037537b8d9ae95c194b201bd1ce1d8f90925d7dba8.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET permissioned_as = ('u/' || $1) WHERE permissioned_as = ('u/' || $2) AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "9b2c42327378963b16eec5037537b8d9ae95c194b201bd1ce1d8f90925d7dba8" +} diff --git a/backend/.sqlx/query-9c19ad9ab14325587d662539c04e18e8dfbdb0bf1dd4c0dc07a55f4eeb4eb5f8.json b/backend/.sqlx/query-9c19ad9ab14325587d662539c04e18e8dfbdb0bf1dd4c0dc07a55f4eeb4eb5f8.json new file mode 100644 index 0000000000000..89368ea0c849e --- /dev/null +++ b/backend/.sqlx/query-9c19ad9ab14325587d662539c04e18e8dfbdb0bf1dd4c0dc07a55f4eeb4eb5f8.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(*) FROM v2_as_queue LEFT JOIN concurrency_key ON concurrency_key.job_id = v2_as_queue.id\n WHERE key = $1 AND running = false AND canceled = false AND scheduled_for >= $2 AND scheduled_for < $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Timestamptz", + "Timestamptz" + ] + }, + "nullable": [ + null + ] + }, + "hash": "9c19ad9ab14325587d662539c04e18e8dfbdb0bf1dd4c0dc07a55f4eeb4eb5f8" +} diff --git a/backend/.sqlx/query-9c7b2d9708a65764d91bf7f8a2cb9e59aba53d8e17f1d8a887128b7863c3924d.json b/backend/.sqlx/query-9c7b2d9708a65764d91bf7f8a2cb9e59aba53d8e17f1d8a887128b7863c3924d.json new file mode 100644 index 0000000000000..b9429cbab7bff --- /dev/null +++ b/backend/.sqlx/query-9c7b2d9708a65764d91bf7f8a2cb9e59aba53d8e17f1d8a887128b7863c3924d.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET args = '{}'::jsonb WHERE id = ANY($1)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray" + ] + }, + "nullable": [] + }, + "hash": "9c7b2d9708a65764d91bf7f8a2cb9e59aba53d8e17f1d8a887128b7863c3924d" +} diff --git a/backend/.sqlx/query-9dd243640439ffc1451cf2bfeb2492e706159b2bc00da9758cccd3c4bcf2a918.json b/backend/.sqlx/query-9dd243640439ffc1451cf2bfeb2492e706159b2bc00da9758cccd3c4bcf2a918.json new file mode 100644 index 0000000000000..1fe35f002e089 --- /dev/null +++ b/backend/.sqlx/query-9dd243640439ffc1451cf2bfeb2492e706159b2bc00da9758cccd3c4bcf2a918.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = flow_status - 'retry'\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "9dd243640439ffc1451cf2bfeb2492e706159b2bc00da9758cccd3c4bcf2a918" +} diff --git a/backend/.sqlx/query-9e7e6fe1dfba032e586f64531e12529d57faf5e77f6792d5bda608ff9658d7c9.json b/backend/.sqlx/query-9e7e6fe1dfba032e586f64531e12529d57faf5e77f6792d5bda608ff9658d7c9.json new file mode 100644 index 0000000000000..2ee7a1acefdea --- /dev/null +++ b/backend/.sqlx/query-9e7e6fe1dfba032e586f64531e12529d57faf5e77f6792d5bda608ff9658d7c9.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS (SELECT 1 FROM v2_as_queue WHERE workspace_id = $1 AND schedule_path = $2 AND scheduled_for = $3)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Timestamptz" + ] + }, + "nullable": [ + null + ] + }, + "hash": "9e7e6fe1dfba032e586f64531e12529d57faf5e77f6792d5bda608ff9658d7c9" +} diff --git a/backend/.sqlx/query-9f38007b605f51615f28aa248af5188d40b02b73c12a7905d59edaefc30e8872.json b/backend/.sqlx/query-9f38007b605f51615f28aa248af5188d40b02b73c12a7905d59edaefc30e8872.json new file mode 100644 index 0000000000000..4a5e9349b89d9 --- /dev/null +++ b/backend/.sqlx/query-9f38007b605f51615f28aa248af5188d40b02b73c12a7905d59edaefc30e8872.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', flow_status->>'step', 'progress'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "9f38007b605f51615f28aa248af5188d40b02b73c12a7905d59edaefc30e8872" +} diff --git a/backend/.sqlx/query-9fcceac37b2f3407357d495c0e1b534f885da45c18866731676d5b797d385ce1.json b/backend/.sqlx/query-9fcceac37b2f3407357d495c0e1b534f885da45c18866731676d5b797d385ce1.json new file mode 100644 index 0000000000000..2e345510d4e68 --- /dev/null +++ b/backend/.sqlx/query-9fcceac37b2f3407357d495c0e1b534f885da45c18866731676d5b797d385ce1.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM v2_job WHERE workspace_id = $1 and flow_root_job = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "9fcceac37b2f3407357d495c0e1b534f885da45c18866731676d5b797d385ce1" +} diff --git a/backend/.sqlx/query-a0c35cb515a842067b294343c90f1bfbe4e2db85da9a478a07460733999e9beb.json b/backend/.sqlx/query-a0c35cb515a842067b294343c90f1bfbe4e2db85da9a478a07460733999e9beb.json new file mode 100644 index 0000000000000..298a66c89ccb5 --- /dev/null +++ b/backend/.sqlx/query-a0c35cb515a842067b294343c90f1bfbe4e2db85da9a478a07460733999e9beb.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT count(*) AS \"count!\" FROM resume_job", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "a0c35cb515a842067b294343c90f1bfbe4e2db85da9a478a07460733999e9beb" +} diff --git a/backend/.sqlx/query-a10e465cb27f7e29d921ea814dd676407f78dcf26f38c93e6097854561e5e2c0.json b/backend/.sqlx/query-a10e465cb27f7e29d921ea814dd676407f78dcf26f38c93e6097854561e5e2c0.json new file mode 100644 index 0000000000000..f1cb953ce5457 --- /dev/null +++ b/backend/.sqlx/query-a10e465cb27f7e29d921ea814dd676407f78dcf26f38c93e6097854561e5e2c0.json @@ -0,0 +1,68 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result AS \"result: sqlx::types::Json>\",\n language AS \"language: ScriptLang\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n success AS \"success!\"\n FROM v2_as_completed_job\n WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + } + }, + { + "ordinal": 2, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "success!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "a10e465cb27f7e29d921ea814dd676407f78dcf26f38c93e6097854561e5e2c0" +} diff --git a/backend/.sqlx/query-a2b7d364705468bd9dfb8f10e0bcc151604feae83d4671dfb9ca77eb8fd8d4f1.json b/backend/.sqlx/query-a2b7d364705468bd9dfb8f10e0bcc151604feae83d4671dfb9ca77eb8fd8d4f1.json new file mode 100644 index 0000000000000..fffd61d36011a --- /dev/null +++ b/backend/.sqlx/query-a2b7d364705468bd9dfb8f10e0bcc151604feae83d4671dfb9ca77eb8fd8d4f1.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_job_completed WHERE workspace_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [] + }, + "hash": "a2b7d364705468bd9dfb8f10e0bcc151604feae83d4671dfb9ca77eb8fd8d4f1" +} diff --git a/backend/.sqlx/query-a39199c8ce9d69784583752774941b7852d809ee6b2b39581cfc9ea1bff82c2d.json b/backend/.sqlx/query-a39199c8ce9d69784583752774941b7852d809ee6b2b39581cfc9ea1bff82c2d.json new file mode 100644 index 0000000000000..0d055a231f79b --- /dev/null +++ b/backend/.sqlx/query-a39199c8ce9d69784583752774941b7852d809ee6b2b39581cfc9ea1bff82c2d.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET args = (SELECT result FROM v2_job_completed WHERE id = $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "a39199c8ce9d69784583752774941b7852d809ee6b2b39581cfc9ea1bff82c2d" +} diff --git a/backend/.sqlx/query-a52f64d349d3f27021d0608a710be64c1244f0382e7a7f81008f6d1f37d408fb.json b/backend/.sqlx/query-a52f64d349d3f27021d0608a710be64c1244f0382e7a7f81008f6d1f37d408fb.json new file mode 100644 index 0000000000000..e4eb385fc7eba --- /dev/null +++ b/backend/.sqlx/query-a52f64d349d3f27021d0608a710be64c1244f0382e7a7f81008f6d1f37d408fb.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = JSONB_SET(\n JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4),\n ARRAY['modules', $1::TEXT, 'branchall', 'branch'],\n ((flow_status->'modules'->$1::int->'branchall'->>'branch')::int + 1)::text::jsonb\n )\n WHERE id = $2\n RETURNING (flow_status->'modules'->$1::int->'branchall'->>'branch')::int", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "int4", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid", + "Text", + "Jsonb" + ] + }, + "nullable": [ + null + ] + }, + "hash": "a52f64d349d3f27021d0608a710be64c1244f0382e7a7f81008f6d1f37d408fb" +} diff --git a/backend/.sqlx/query-a68754521bf751450602f04dd4243199a18885e1739a5a0e7f6100eab6f3c803.json b/backend/.sqlx/query-a68754521bf751450602f04dd4243199a18885e1739a5a0e7f6100eab6f3c803.json new file mode 100644 index 0000000000000..8d6e6a2416a92 --- /dev/null +++ b/backend/.sqlx/query-a68754521bf751450602f04dd4243199a18885e1739a5a0e7f6100eab6f3c803.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_runtime (id) VALUES ($1)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "a68754521bf751450602f04dd4243199a18885e1739a5a0e7f6100eab6f3c803" +} diff --git a/backend/.sqlx/query-a6d52684b02d648294c45f563e74c577ce2a41c932dc285ddd6e82a732662e1c.json b/backend/.sqlx/query-a6d52684b02d648294c45f563e74c577ce2a41c932dc285ddd6e82a732662e1c.json new file mode 100644 index 0000000000000..d3f800d34d681 --- /dev/null +++ b/backend/.sqlx/query-a6d52684b02d648294c45f563e74c577ce2a41c932dc285ddd6e82a732662e1c.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = jsonb_set(\n jsonb_set(\n COALESCE(flow_status, '{}'::jsonb),\n array[$1],\n COALESCE(flow_status->$1, '{}'::jsonb)\n ),\n array[$1, 'started_at'],\n to_jsonb(now()::text)\n )\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "a6d52684b02d648294c45f563e74c577ce2a41c932dc285ddd6e82a732662e1c" +} diff --git a/backend/.sqlx/query-aaacf51f3a43617b60e4253549ec4cc6a3648992b590c6e0121e4d7434da516b.json b/backend/.sqlx/query-aaacf51f3a43617b60e4253549ec4cc6a3648992b590c6e0121e4d7434da516b.json new file mode 100644 index 0000000000000..5ce9a0ea5b8cb --- /dev/null +++ b/backend/.sqlx/query-aaacf51f3a43617b60e4253549ec4cc6a3648992b590c6e0121e4d7434da516b.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET created_by = $1 WHERE created_by = $2 AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "aaacf51f3a43617b60e4253549ec4cc6a3648992b590c6e0121e4d7434da516b" +} diff --git a/backend/.sqlx/query-ab04cda71f8e2be9acbecabe1ee5ef756b8e5c1955fbe111df9ee171dc262338.json b/backend/.sqlx/query-ab04cda71f8e2be9acbecabe1ee5ef756b8e5c1955fbe111df9ee171dc262338.json new file mode 100644 index 0000000000000..3284fb18463d0 --- /dev/null +++ b/backend/.sqlx/query-ab04cda71f8e2be9acbecabe1ee5ef756b8e5c1955fbe111df9ee171dc262338.json @@ -0,0 +1,84 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job (id, runnable_id, runnable_path, kind, script_lang, tag, created_by, permissioned_as, permissioned_as_email, workspace_id, raw_flow) (SELECT gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 FROM generate_series(1, 1)) RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Int8", + "Varchar", + { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + }, + { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + }, + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Jsonb" + ] + }, + "nullable": [ + false + ] + }, + "hash": "ab04cda71f8e2be9acbecabe1ee5ef756b8e5c1955fbe111df9ee171dc262338" +} diff --git a/backend/.sqlx/query-ab9e47e5b510e7df5a41db12896675393a6bb27f8e14245410751961218a7df5.json b/backend/.sqlx/query-ab9e47e5b510e7df5a41db12896675393a6bb27f8e14245410751961218a7df5.json new file mode 100644 index 0000000000000..868541a1754c0 --- /dev/null +++ b/backend/.sqlx/query-ab9e47e5b510e7df5a41db12896675393a6bb27f8e14245410751961218a7df5.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COALESCE((SELECT MIN(started_at) as min_started_at\n FROM v2_as_queue\n WHERE script_path = $1 AND job_kind != 'dependencies' AND running = true AND workspace_id = $2 AND canceled = false AND concurrent_limit > 0), $3) as min_started_at, now() AS now", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "min_started_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 1, + "name": "now", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Timestamptz" + ] + }, + "nullable": [ + null, + null + ] + }, + "hash": "ab9e47e5b510e7df5a41db12896675393a6bb27f8e14245410751961218a7df5" +} diff --git a/backend/.sqlx/query-acc0b67c8e768b524b5cfb309e4307daeb0c095e07063c57b26ab94211bf6359.json b/backend/.sqlx/query-acc0b67c8e768b524b5cfb309e4307daeb0c095e07063c57b26ab94211bf6359.json new file mode 100644 index 0000000000000..58f693df33e5f --- /dev/null +++ b/backend/.sqlx/query-acc0b67c8e768b524b5cfb309e4307daeb0c095e07063c57b26ab94211bf6359.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\" FROM v2_as_queue WHERE id = ANY($1) AND schedule_path IS NULL AND ($2::text[] IS NULL OR tag = ANY($2))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "UuidArray", + "TextArray" + ] + }, + "nullable": [ + true + ] + }, + "hash": "acc0b67c8e768b524b5cfb309e4307daeb0c095e07063c57b26ab94211bf6359" +} diff --git a/backend/.sqlx/query-b2e4dfaaee713604d0700ea4675ed2d4534e08471a60005bff85b952874c54c2.json b/backend/.sqlx/query-b2e4dfaaee713604d0700ea4675ed2d4534e08471a60005bff85b952874c54c2.json new file mode 100644 index 0000000000000..fabcec9b2a5d4 --- /dev/null +++ b/backend/.sqlx/query-b2e4dfaaee713604d0700ea4675ed2d4534e08471a60005bff85b952874c54c2.json @@ -0,0 +1,71 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n email AS \"email!\",\n created_by AS \"created_by!\",\n parent_job, permissioned_as AS \"permissioned_as!\",\n script_path, schedule_path, flow_step_id, root_job,\n scheduled_for AS \"scheduled_for!: chrono::DateTime\"\n FROM v2_as_queue WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "email!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "parent_job", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "permissioned_as!", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "schedule_path", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "flow_step_id", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "root_job", + "type_info": "Uuid" + }, + { + "ordinal": 8, + "name": "scheduled_for!: chrono::DateTime", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "b2e4dfaaee713604d0700ea4675ed2d4534e08471a60005bff85b952874c54c2" +} diff --git a/backend/.sqlx/query-b3fd0be8a7ef6d8dd8943aef45f1555d6e088cb981b230d1cbbf8e8b832cad43.json b/backend/.sqlx/query-b3fd0be8a7ef6d8dd8943aef45f1555d6e088cb981b230d1cbbf8e8b832cad43.json new file mode 100644 index 0000000000000..4c668f9b250e8 --- /dev/null +++ b/backend/.sqlx/query-b3fd0be8a7ef6d8dd8943aef45f1555d6e088cb981b230d1cbbf8e8b832cad43.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue SET\n canceled_by = $2,\n canceled_reason = 're-deployment'\n WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "b3fd0be8a7ef6d8dd8943aef45f1555d6e088cb981b230d1cbbf8e8b832cad43" +} diff --git a/backend/.sqlx/query-b4a9abcb38997587b28655b0f4a212a5bd4039b57fab20b163617e33a4c9dd46.json b/backend/.sqlx/query-b4a9abcb38997587b28655b0f4a212a5bd4039b57fab20b163617e33a4c9dd46.json new file mode 100644 index 0000000000000..a49baeefaff5a --- /dev/null +++ b/backend/.sqlx/query-b4a9abcb38997587b28655b0f4a212a5bd4039b57fab20b163617e33a4c9dd46.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job_runtime (id) SELECT unnest($1::uuid[])", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray" + ] + }, + "nullable": [] + }, + "hash": "b4a9abcb38997587b28655b0f4a212a5bd4039b57fab20b163617e33a4c9dd46" +} diff --git a/backend/.sqlx/query-b6c146ef8db9b4d22a895853b951fef4daa669ec43ca3f457cc2c473b8e30b08.json b/backend/.sqlx/query-b6c146ef8db9b4d22a895853b951fef4daa669ec43ca3f457cc2c473b8e30b08.json new file mode 100644 index 0000000000000..7df3060e928af --- /dev/null +++ b/backend/.sqlx/query-b6c146ef8db9b4d22a895853b951fef4daa669ec43ca3f457cc2c473b8e30b08.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT leaf_jobs->$1::text AS \"leaf_jobs: Json>\", parent_job\n FROM v2_as_queue\n WHERE COALESCE((SELECT flow_root_job FROM v2_job WHERE id = $2), $2) = id AND workspace_id = $3", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "leaf_jobs: Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "parent_job", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [ + null, + true + ] + }, + "hash": "b6c146ef8db9b4d22a895853b951fef4daa669ec43ca3f457cc2c473b8e30b08" +} diff --git a/backend/.sqlx/query-b6f4037e559e0540f0c42b292a458c1aca73c0d750f589facccfcf73f9520861.json b/backend/.sqlx/query-b6f4037e559e0540f0c42b292a458c1aca73c0d750f589facccfcf73f9520861.json new file mode 100644 index 0000000000000..353e1dcbdb99a --- /dev/null +++ b/backend/.sqlx/query-b6f4037e559e0540f0c42b292a458c1aca73c0d750f589facccfcf73f9520861.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = JSONB_SET(flow_status, ARRAY['approval_conditions'], $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "b6f4037e559e0540f0c42b292a458c1aca73c0d750f589facccfcf73f9520861" +} diff --git a/backend/.sqlx/query-bcc79dc05e227625bc2ba70350a41a254e00e84030aefff91a7b420fe7513b0d.json b/backend/.sqlx/query-bcc79dc05e227625bc2ba70350a41a254e00e84030aefff91a7b420fe7513b0d.json new file mode 100644 index 0000000000000..25db03d1b5fff --- /dev/null +++ b/backend/.sqlx/query-bcc79dc05e227625bc2ba70350a41a254e00e84030aefff91a7b420fe7513b0d.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result #> $3 AS \"result: Json>\"\n FROM v2_job_completed WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + null + ] + }, + "hash": "bcc79dc05e227625bc2ba70350a41a254e00e84030aefff91a7b420fe7513b0d" +} diff --git a/backend/.sqlx/query-bd0ca94343399fa2fa06d9304da4ed90a18a3c952783c419707ffbb38e232ce0.json b/backend/.sqlx/query-bd0ca94343399fa2fa06d9304da4ed90a18a3c952783c419707ffbb38e232ce0.json new file mode 100644 index 0000000000000..2e06f25b984bc --- /dev/null +++ b/backend/.sqlx/query-bd0ca94343399fa2fa06d9304da4ed90a18a3c952783c419707ffbb38e232ce0.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT tag AS \"tag!\", count(*) AS \"count!\" FROM v2_job_queue WHERE\n scheduled_for <= now() - ('3 seconds')::interval AND running = false\n GROUP BY tag", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tag!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "count!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + null + ] + }, + "hash": "bd0ca94343399fa2fa06d9304da4ed90a18a3c952783c419707ffbb38e232ce0" +} diff --git a/backend/.sqlx/query-be01bed8689bf58a7403021f8b6f149ae8c04beb9fcbff60c5ee65cf71b44927.json b/backend/.sqlx/query-be01bed8689bf58a7403021f8b6f149ae8c04beb9fcbff60c5ee65cf71b44927.json new file mode 100644 index 0000000000000..a3f065d083a14 --- /dev/null +++ b/backend/.sqlx/query-be01bed8689bf58a7403021f8b6f149ae8c04beb9fcbff60c5ee65cf71b44927.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2)\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "be01bed8689bf58a7403021f8b6f149ae8c04beb9fcbff60c5ee65cf71b44927" +} diff --git a/backend/.sqlx/query-bff39cc57aba0729ddef1d53f3806c6736556f0a14b489d6708f9879393f9ea3.json b/backend/.sqlx/query-bff39cc57aba0729ddef1d53f3806c6736556f0a14b489d6708f9879393f9ea3.json new file mode 100644 index 0000000000000..11e498f38d558 --- /dev/null +++ b/backend/.sqlx/query-bff39cc57aba0729ddef1d53f3806c6736556f0a14b489d6708f9879393f9ea3.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT usr.email, usage.executions\n FROM usr\n , LATERAL (\n SELECT COALESCE(SUM(duration_ms + 1000)/1000 , 0)::BIGINT executions\n FROM v2_as_completed_job\n WHERE workspace_id = $1\n AND job_kind NOT IN ('flow', 'flowpreview', 'flownode')\n AND email = usr.email\n AND now() - '1 week'::interval < created_at \n ) usage\n WHERE workspace_id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "email", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "executions", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + null + ] + }, + "hash": "bff39cc57aba0729ddef1d53f3806c6736556f0a14b489d6708f9879393f9ea3" +} diff --git a/backend/.sqlx/query-c00bae0d8c9bee37cbad4de4cb02c80d00f52a3fc32bf32271ebc90f7837abda.json b/backend/.sqlx/query-c00bae0d8c9bee37cbad4de4cb02c80d00f52a3fc32bf32271ebc90f7837abda.json new file mode 100644 index 0000000000000..74aa6c4ffb6b4 --- /dev/null +++ b/backend/.sqlx/query-c00bae0d8c9bee37cbad4de4cb02c80d00f52a3fc32bf32271ebc90f7837abda.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue\n SET canceled_by = 'timeout'\n , canceled_reason = $1\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "c00bae0d8c9bee37cbad4de4cb02c80d00f52a3fc32bf32271ebc90f7837abda" +} diff --git a/backend/.sqlx/query-c03b4c5878bd0a0c545cb66b146d0b6ceae9de456d3ddb6008707589dbe30747.json b/backend/.sqlx/query-c03b4c5878bd0a0c545cb66b146d0b6ceae9de456d3ddb6008707589dbe30747.json new file mode 100644 index 0000000000000..5cb05f49a0866 --- /dev/null +++ b/backend/.sqlx/query-c03b4c5878bd0a0c545cb66b146d0b6ceae9de456d3ddb6008707589dbe30747.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT 1 FROM v2_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "c03b4c5878bd0a0c545cb66b146d0b6ceae9de456d3ddb6008707589dbe30747" +} diff --git a/backend/.sqlx/query-c03decd33061ab912c9afed841092b05105c261976cce78c77d7f85abf9ad40c.json b/backend/.sqlx/query-c03decd33061ab912c9afed841092b05105c261976cce78c77d7f85abf9ad40c.json new file mode 100644 index 0000000000000..17532bfc77d26 --- /dev/null +++ b/backend/.sqlx/query-c03decd33061ab912c9afed841092b05105c261976cce78c77d7f85abf9ad40c.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET args = NULL WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "c03decd33061ab912c9afed841092b05105c261976cce78c77d7f85abf9ad40c" +} diff --git a/backend/.sqlx/query-c5259e37703c3e48104438bad6e1f3615f4439c090a75e6fde03702a21589b25.json b/backend/.sqlx/query-c5259e37703c3e48104438bad6e1f3615f4439c090a75e6fde03702a21589b25.json new file mode 100644 index 0000000000000..ee362d8845a09 --- /dev/null +++ b/backend/.sqlx/query-c5259e37703c3e48104438bad6e1f3615f4439c090a75e6fde03702a21589b25.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\" FROM v2_job WHERE parent_job = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "c5259e37703c3e48104438bad6e1f3615f4439c090a75e6fde03702a21589b25" +} diff --git a/backend/.sqlx/query-c6cab61c299163f99ebcf0338994f50c4aec75759cb928bba9e2e6a3a5b06edd.json b/backend/.sqlx/query-c6cab61c299163f99ebcf0338994f50c4aec75759cb928bba9e2e6a3a5b06edd.json new file mode 100644 index 0000000000000..d4ac2ced59a8d --- /dev/null +++ b/backend/.sqlx/query-c6cab61c299163f99ebcf0338994f50c4aec75759cb928bba9e2e6a3a5b06edd.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = JSONB_SET(\n JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2),\n ARRAY['step'],\n $3\n )\n WHERE id = $4", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Jsonb", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "c6cab61c299163f99ebcf0338994f50c4aec75759cb928bba9e2e6a3a5b06edd" +} diff --git a/backend/.sqlx/query-cf85496774cd3ce58543dd0d52b5eadedad1a29049f8023e90deb57b19a41c72.json b/backend/.sqlx/query-cf85496774cd3ce58543dd0d52b5eadedad1a29049f8023e90deb57b19a41c72.json new file mode 100644 index 0000000000000..047b758fdf9b6 --- /dev/null +++ b/backend/.sqlx/query-cf85496774cd3ce58543dd0d52b5eadedad1a29049f8023e90deb57b19a41c72.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COUNT(id) FROM v2_job_queue WHERE workspace_id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "cf85496774cd3ce58543dd0d52b5eadedad1a29049f8023e90deb57b19a41c72" +} diff --git a/backend/.sqlx/query-d25c58d2722ad3dcd91101ce6f66e1d802dd5d82e1cd5f5ed3a15cbc75eb6745.json b/backend/.sqlx/query-d25c58d2722ad3dcd91101ce6f66e1d802dd5d82e1cd5f5ed3a15cbc75eb6745.json new file mode 100644 index 0000000000000..2872c1655ba8f --- /dev/null +++ b/backend/.sqlx/query-d25c58d2722ad3dcd91101ce6f66e1d802dd5d82e1cd5f5ed3a15cbc75eb6745.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_job_queue WHERE workspace_id = $1 AND id = $2 RETURNING 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d25c58d2722ad3dcd91101ce6f66e1d802dd5d82e1cd5f5ed3a15cbc75eb6745" +} diff --git a/backend/.sqlx/query-d2a9e6a31bab0551d32093b1afe4e5d414cf439e4db3325b5bf6bbeb86c5bd2a.json b/backend/.sqlx/query-d2a9e6a31bab0551d32093b1afe4e5d414cf439e4db3325b5bf6bbeb86c5bd2a.json new file mode 100644 index 0000000000000..4b96e7005b0d6 --- /dev/null +++ b/backend/.sqlx/query-d2a9e6a31bab0551d32093b1afe4e5d414cf439e4db3325b5bf6bbeb86c5bd2a.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH to_delete AS (\n SELECT id FROM v2_job_queue\n JOIN v2_job j USING (id)\n WHERE trigger_kind = 'schedule'\n AND trigger = $1\n AND j.workspace_id = $2\n AND flow_step_id IS NULL\n AND running = false\n FOR UPDATE\n ), deleted AS (\n DELETE FROM v2_job_queue\n WHERE id IN (SELECT id FROM to_delete)\n RETURNING id\n ) DELETE FROM v2_job WHERE id IN (SELECT id FROM deleted)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "d2a9e6a31bab0551d32093b1afe4e5d414cf439e4db3325b5bf6bbeb86c5bd2a" +} diff --git a/backend/.sqlx/query-d4d83d8177144c91aa489b5a42a45c83f8b069a52f681f14afb4931ac77baf45.json b/backend/.sqlx/query-d4d83d8177144c91aa489b5a42a45c83f8b069a52f681f14afb4931ac77baf45.json new file mode 100644 index 0000000000000..3422605ef1bf1 --- /dev/null +++ b/backend/.sqlx/query-d4d83d8177144c91aa489b5a42a45c83f8b069a52f681f14afb4931ac77baf45.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT id AS \"id!\", flow_status, suspend AS \"suspend!\", script_path\n FROM v2_as_queue\n WHERE id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "suspend!", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "script_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "d4d83d8177144c91aa489b5a42a45c83f8b069a52f681f14afb4931ac77baf45" +} diff --git a/backend/.sqlx/query-d6c8f4e49cf7b6db5c979c88e02bd41c7b823186ac056a0a676da85dc5d9a027.json b/backend/.sqlx/query-d6c8f4e49cf7b6db5c979c88e02bd41c7b823186ac056a0a676da85dc5d9a027.json new file mode 100644 index 0000000000000..d090c25a37e6b --- /dev/null +++ b/backend/.sqlx/query-d6c8f4e49cf7b6db5c979c88e02bd41c7b823186ac056a0a676da85dc5d9a027.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id AS \"id!\" FROM v2_as_queue WHERE schedule_path = $1 AND workspace_id = $2 AND id != $3 AND running = true", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "d6c8f4e49cf7b6db5c979c88e02bd41c7b823186ac056a0a676da85dc5d9a027" +} diff --git a/backend/.sqlx/query-d70ad8045589d746fc224ca950dcf967033942e489831cb5911723af1ec5161e.json b/backend/.sqlx/query-d70ad8045589d746fc224ca950dcf967033942e489831cb5911723af1ec5161e.json new file mode 100644 index 0000000000000..0873f799d6643 --- /dev/null +++ b/backend/.sqlx/query-d70ad8045589d746fc224ca950dcf967033942e489831cb5911723af1ec5161e.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT kind = 'identity' FROM v2_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d70ad8045589d746fc224ca950dcf967033942e489831cb5911723af1ec5161e" +} diff --git a/backend/.sqlx/query-d988e91087695742d75946100cf2b7593cb8eed2a97411697819849958c022b3.json b/backend/.sqlx/query-d988e91087695742d75946100cf2b7593cb8eed2a97411697819849958c022b3.json new file mode 100644 index 0000000000000..15625b26c26b1 --- /dev/null +++ b/backend/.sqlx/query-d988e91087695742d75946100cf2b7593cb8eed2a97411697819849958c022b3.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT result AS \"result!: Json>\"\n FROM v2_job_completed WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result!: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "d988e91087695742d75946100cf2b7593cb8eed2a97411697819849958c022b3" +} diff --git a/backend/.sqlx/query-d99ae453289ae967ce92a221375aeca87ed9764bfa683b9d49294c24495ad392.json b/backend/.sqlx/query-d99ae453289ae967ce92a221375aeca87ed9764bfa683b9d49294c24495ad392.json new file mode 100644 index 0000000000000..739ec473468d5 --- /dev/null +++ b/backend/.sqlx/query-d99ae453289ae967ce92a221375aeca87ed9764bfa683b9d49294c24495ad392.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT raw_flow->'failure_module' != 'null'::jsonb FROM v2_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d99ae453289ae967ce92a221375aeca87ed9764bfa683b9d49294c24495ad392" +} diff --git a/backend/.sqlx/query-da57f154108469af911579c7e021eacb8fdccab2bd7dec3fcc5c08f7685734d2.json b/backend/.sqlx/query-da57f154108469af911579c7e021eacb8fdccab2bd7dec3fcc5c08f7685734d2.json new file mode 100644 index 0000000000000..d9441deda407f --- /dev/null +++ b/backend/.sqlx/query-da57f154108469af911579c7e021eacb8fdccab2bd7dec3fcc5c08f7685734d2.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = jsonb_set(\n jsonb_set(\n COALESCE(flow_status, '{}'::jsonb),\n array[$1],\n COALESCE(flow_status->$1, '{}'::jsonb)\n ),\n array[$1, 'duration_ms'],\n to_jsonb($2::bigint)\n )\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "da57f154108469af911579c7e021eacb8fdccab2bd7dec3fcc5c08f7685734d2" +} diff --git a/backend/.sqlx/query-dc1d42c31155a03d6ecc279d08f7c5efa273188771db61fb0632c88e8b5c8558.json b/backend/.sqlx/query-dc1d42c31155a03d6ecc279d08f7c5efa273188771db61fb0632c88e8b5c8558.json new file mode 100644 index 0000000000000..386dc58ec8e3d --- /dev/null +++ b/backend/.sqlx/query-dc1d42c31155a03d6ecc279d08f7c5efa273188771db61fb0632c88e8b5c8558.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime\n SET flow_status = JSONB_SET(flow_status, ARRAY['cleanup_module', 'flow_jobs_to_clean'], COALESCE(flow_status->'cleanup_module'->'flow_jobs_to_clean', '[]'::jsonb) || $1)\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "dc1d42c31155a03d6ecc279d08f7c5efa273188771db61fb0632c88e8b5c8558" +} diff --git a/backend/.sqlx/query-dd5432830c1555ecbd6a0e45988fff455f3191f152e0900e09b71abb90d074ab.json b/backend/.sqlx/query-dd5432830c1555ecbd6a0e45988fff455f3191f152e0900e09b71abb90d074ab.json new file mode 100644 index 0000000000000..f7690bb33a896 --- /dev/null +++ b/backend/.sqlx/query-dd5432830c1555ecbd6a0e45988fff455f3191f152e0900e09b71abb90d074ab.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id, result AS \"result: Json>\"\n FROM v2_job_completed WHERE id = ANY($1) AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "result: Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "UuidArray", + "Text" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "dd5432830c1555ecbd6a0e45988fff455f3191f152e0900e09b71abb90d074ab" +} diff --git a/backend/.sqlx/query-defd99dd2427cdc54bb662d1ba3a1aea7f410ef204ec3465f4fb6c9acd256c95.json b/backend/.sqlx/query-defd99dd2427cdc54bb662d1ba3a1aea7f410ef204ec3465f4fb6c9acd256c95.json new file mode 100644 index 0000000000000..6aab7bc049c2e --- /dev/null +++ b/backend/.sqlx/query-defd99dd2427cdc54bb662d1ba3a1aea7f410ef204ec3465f4fb6c9acd256c95.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_queue SET\n suspend = $1,\n suspend_until = now() + interval '14 day',\n running = true\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "defd99dd2427cdc54bb662d1ba3a1aea7f410ef204ec3465f4fb6c9acd256c95" +} diff --git a/backend/.sqlx/query-df43c959e1f20cff2394c6d226439d688c979cff8c0bdd02e4ec91891bf0b3a6.json b/backend/.sqlx/query-df43c959e1f20cff2394c6d226439d688c979cff8c0bdd02e4ec91891bf0b3a6.json new file mode 100644 index 0000000000000..c8f8c1970c35c --- /dev/null +++ b/backend/.sqlx/query-df43c959e1f20cff2394c6d226439d688c979cff8c0bdd02e4ec91891bf0b3a6.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = JSONB_SET(\n JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4),\n ARRAY['modules', $1::TEXT, 'iterator', 'index'],\n ((flow_status->'modules'->$1::int->'iterator'->>'index')::int + 1)::text::jsonb\n )\n WHERE id = $2\n RETURNING (flow_status->'modules'->$1::int->'iterator'->>'index')::int", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "int4", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4", + "Uuid", + "Text", + "Jsonb" + ] + }, + "nullable": [ + null + ] + }, + "hash": "df43c959e1f20cff2394c6d226439d688c979cff8c0bdd02e4ec91891bf0b3a6" +} diff --git a/backend/.sqlx/query-e01c6ed633560647cfdf24f79298c0213ae4f66fdcc3a486120bb5116b1ce086.json b/backend/.sqlx/query-e01c6ed633560647cfdf24f79298c0213ae4f66fdcc3a486120bb5116b1ce086.json new file mode 100644 index 0000000000000..ed8554404e337 --- /dev/null +++ b/backend/.sqlx/query-e01c6ed633560647cfdf24f79298c0213ae4f66fdcc3a486120bb5116b1ce086.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_runtime SET ping = NULL WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "e01c6ed633560647cfdf24f79298c0213ae4f66fdcc3a486120bb5116b1ce086" +} diff --git a/backend/.sqlx/query-e47b00656e5a3321fefef0e02cdec4dd12900340d7ffcc076a4f969e0b73c4a8.json b/backend/.sqlx/query-e47b00656e5a3321fefef0e02cdec4dd12900340d7ffcc076a4f969e0b73c4a8.json new file mode 100644 index 0000000000000..b14b7ce59d871 --- /dev/null +++ b/backend/.sqlx/query-e47b00656e5a3321fefef0e02cdec4dd12900340d7ffcc076a4f969e0b73c4a8.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT q.id, f.flow_status, q.suspend, j.runnable_path AS script_path\n FROM v2_job_queue q\n JOIN v2_job j USING (id)\n JOIN v2_job_flow_runtime f USING (id)\n WHERE id = ( SELECT parent_job FROM v2_job WHERE id = $1 )\n FOR UPDATE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "flow_status", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "suspend", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "script_path", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + true + ] + }, + "hash": "e47b00656e5a3321fefef0e02cdec4dd12900340d7ffcc076a4f969e0b73c4a8" +} diff --git a/backend/.sqlx/query-e64931144cb58991c13d475cd225a7c20cf22ca7b31334a173bafafa760e1a78.json b/backend/.sqlx/query-e64931144cb58991c13d475cd225a7c20cf22ca7b31334a173bafafa760e1a78.json new file mode 100644 index 0000000000000..1c74ac9351bef --- /dev/null +++ b/backend/.sqlx/query-e64931144cb58991c13d475cd225a7c20cf22ca7b31334a173bafafa760e1a78.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_completed SET flow_status = f.flow_status FROM v2_job_flow_runtime f WHERE v2_job_completed.id = $1 AND f.id = $1 AND v2_job_completed.workspace_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [] + }, + "hash": "e64931144cb58991c13d475cd225a7c20cf22ca7b31334a173bafafa760e1a78" +} diff --git a/backend/.sqlx/query-e653d36b607a16c0dfc0324690942ab25883b53a81ebb581fe019af2ec5eb567.json b/backend/.sqlx/query-e653d36b607a16c0dfc0324690942ab25883b53a81ebb581fe019af2ec5eb567.json new file mode 100644 index 0000000000000..284cf3338fdfb --- /dev/null +++ b/backend/.sqlx/query-e653d36b607a16c0dfc0324690942ab25883b53a81ebb581fe019af2ec5eb567.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n id AS \"id!\", workspace_id AS \"workspace_id!\", parent_job, is_flow_step,\n flow_status AS \"flow_status: Box\", last_ping, same_worker\n FROM v2_as_queue\n WHERE running = true AND suspend = 0 AND suspend_until IS null AND scheduled_for <= now()\n AND (job_kind = 'flow' OR job_kind = 'flowpreview' OR job_kind = 'flownode')\n AND last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval\n AND canceled = false\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Uuid" + }, + { + "ordinal": 1, + "name": "workspace_id!", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "parent_job", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "is_flow_step", + "type_info": "Bool" + }, + { + "ordinal": 4, + "name": "flow_status: Box", + "type_info": "Jsonb" + }, + { + "ordinal": 5, + "name": "last_ping", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "same_worker", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "e653d36b607a16c0dfc0324690942ab25883b53a81ebb581fe019af2ec5eb567" +} diff --git a/backend/.sqlx/query-e67509f23f769854c8ee74677b4634b90631242e3e3991659a785ff1256d5f4d.json b/backend/.sqlx/query-e67509f23f769854c8ee74677b4634b90631242e3e3991659a785ff1256d5f4d.json new file mode 100644 index 0000000000000..a7662bec957a0 --- /dev/null +++ b/backend/.sqlx/query-e67509f23f769854c8ee74677b4634b90631242e3e3991659a785ff1256d5f4d.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT canceled_by IS NOT NULL AS \"canceled!\" FROM v2_job_queue WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "canceled!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e67509f23f769854c8ee74677b4634b90631242e3e3991659a785ff1256d5f4d" +} diff --git a/backend/.sqlx/query-e9533284529b19582eadad5c9287bcffe58dfc26ad4b6ad56390105fded5034f.json b/backend/.sqlx/query-e9533284529b19582eadad5c9287bcffe58dfc26ad4b6ad56390105fded5034f.json new file mode 100644 index 0000000000000..88fdeca9a3429 --- /dev/null +++ b/backend/.sqlx/query-e9533284529b19582eadad5c9287bcffe58dfc26ad4b6ad56390105fded5034f.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = JSONB_SET(\n flow_status,\n ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT],\n $4\n )\n WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "e9533284529b19582eadad5c9287bcffe58dfc26ad4b6ad56390105fded5034f" +} diff --git a/backend/.sqlx/query-ecd62c48fe2fba2fc2582e9e7ae5590d5dea8c67f6ae7b14743ac4f265dd89a3.json b/backend/.sqlx/query-ecd62c48fe2fba2fc2582e9e7ae5590d5dea8c67f6ae7b14743ac4f265dd89a3.json new file mode 100644 index 0000000000000..7431f3e1fbc05 --- /dev/null +++ b/backend/.sqlx/query-ecd62c48fe2fba2fc2582e9e7ae5590d5dea8c67f6ae7b14743ac4f265dd89a3.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM v2_job WHERE id = ANY($1)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "UuidArray" + ] + }, + "nullable": [] + }, + "hash": "ecd62c48fe2fba2fc2582e9e7ae5590d5dea8c67f6ae7b14743ac4f265dd89a3" +} diff --git a/backend/.sqlx/query-ef8413620c6860c3bf200894c5917c9209817a9cd8eb3bcd05a74f55423054ae.json b/backend/.sqlx/query-ef8413620c6860c3bf200894c5917c9209817a9cd8eb3bcd05a74f55423054ae.json new file mode 100644 index 0000000000000..58485a7cfbee0 --- /dev/null +++ b/backend/.sqlx/query-ef8413620c6860c3bf200894c5917c9209817a9cd8eb3bcd05a74f55423054ae.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT created_by AS \"created_by!\", args as \"args: sqlx::types::Json>\"\n FROM v2_job\n WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "args: sqlx::types::Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "ef8413620c6860c3bf200894c5917c9209817a9cd8eb3bcd05a74f55423054ae" +} diff --git a/backend/.sqlx/query-f0d3c2641924b1f1026d4dfa19290850047653549ee20870e6272cb7d4cfb9aa.json b/backend/.sqlx/query-f0d3c2641924b1f1026d4dfa19290850047653549ee20870e6272cb7d4cfb9aa.json new file mode 100644 index 0000000000000..5fa15ac703821 --- /dev/null +++ b/backend/.sqlx/query-f0d3c2641924b1f1026d4dfa19290850047653549ee20870e6272cb7d4cfb9aa.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT parent_job FROM v2_job WHERE id = $1 AND workspace_id = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "parent_job", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true + ] + }, + "hash": "f0d3c2641924b1f1026d4dfa19290850047653549ee20870e6272cb7d4cfb9aa" +} diff --git a/backend/.sqlx/query-f0fdeb7aea3e71099e7db0f4343bbd7ec86610ddc8589bf5b606fab0947c8b75.json b/backend/.sqlx/query-f0fdeb7aea3e71099e7db0f4343bbd7ec86610ddc8589bf5b606fab0947c8b75.json new file mode 100644 index 0000000000000..26111685ba907 --- /dev/null +++ b/backend/.sqlx/query-f0fdeb7aea3e71099e7db0f4343bbd7ec86610ddc8589bf5b606fab0947c8b75.json @@ -0,0 +1,91 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n v2_as_queue.job_kind AS \"job_kind!: JobKind\",\n v2_as_queue.script_hash AS \"script_hash: ScriptHash\",\n v2_as_queue.raw_flow AS \"raw_flow: sqlx::types::Json>\",\n v2_as_completed_job.parent_job AS \"parent_job: Uuid\",\n v2_as_completed_job.created_at AS \"created_at!: chrono::NaiveDateTime\",\n v2_as_completed_job.created_by AS \"created_by!\",\n v2_as_queue.script_path,\n v2_as_queue.args AS \"args: sqlx::types::Json>\"\n FROM v2_as_queue\n JOIN v2_as_completed_job ON v2_as_completed_job.parent_job = v2_as_queue.id\n WHERE v2_as_completed_job.id = $1 AND v2_as_completed_job.workspace_id = $2\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "job_kind!: JobKind", + "type_info": { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + } + }, + { + "ordinal": 1, + "name": "script_hash: ScriptHash", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "raw_flow: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "parent_job: Uuid", + "type_info": "Uuid" + }, + { + "ordinal": 4, + "name": "created_at!: chrono::NaiveDateTime", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "created_by!", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "script_path", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "args: sqlx::types::Json>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text" + ] + }, + "nullable": [ + true, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "f0fdeb7aea3e71099e7db0f4343bbd7ec86610ddc8589bf5b606fab0947c8b75" +} diff --git a/backend/.sqlx/query-f2e0bf4cad9c68220f1955af425de9a730290525862e4ac27dccfdc51ee10093.json b/backend/.sqlx/query-f2e0bf4cad9c68220f1955af425de9a730290525862e4ac27dccfdc51ee10093.json new file mode 100644 index 0000000000000..37048690e8e41 --- /dev/null +++ b/backend/.sqlx/query-f2e0bf4cad9c68220f1955af425de9a730290525862e4ac27dccfdc51ee10093.json @@ -0,0 +1,70 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n result #> $3 AS \"result: sqlx::types::Json>\",\n flow_status AS \"flow_status: sqlx::types::Json>\",\n language AS \"language: ScriptLang\",\n created_by AS \"created_by!\"\n FROM v2_as_completed_job\n WHERE id = $1 AND workspace_id = $2 AND ($4::text[] IS NULL OR tag = ANY($4))", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "result: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "flow_status: sqlx::types::Json>", + "type_info": "Jsonb" + }, + { + "ordinal": 2, + "name": "language: ScriptLang", + "type_info": { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + } + }, + { + "ordinal": 3, + "name": "created_by!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Text", + "TextArray", + "TextArray" + ] + }, + "nullable": [ + null, + true, + true, + true + ] + }, + "hash": "f2e0bf4cad9c68220f1955af425de9a730290525862e4ac27dccfdc51ee10093" +} diff --git a/backend/.sqlx/query-f3571e1d2b57011e5f6a38725eb42d909456d28a98563923cca43e760862e5e0.json b/backend/.sqlx/query-f3571e1d2b57011e5f6a38725eb42d909456d28a98563923cca43e760862e5e0.json new file mode 100644 index 0000000000000..804f35777e47a --- /dev/null +++ b/backend/.sqlx/query-f3571e1d2b57011e5f6a38725eb42d909456d28a98563923cca43e760862e5e0.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT flow_status->'user_states'->$1\n FROM v2_as_queue\n WHERE id = $2 AND workspace_id = $3\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Text", + "Uuid", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "f3571e1d2b57011e5f6a38725eb42d909456d28a98563923cca43e760862e5e0" +} diff --git a/backend/.sqlx/query-f5d8c9ad5a64a7e2531bc84d26f942d30ecbf3f7097cb25f5ad5841756e3e61e.json b/backend/.sqlx/query-f5d8c9ad5a64a7e2531bc84d26f942d30ecbf3f7097cb25f5ad5841756e3e61e.json new file mode 100644 index 0000000000000..61a646f5a6bec --- /dev/null +++ b/backend/.sqlx/query-f5d8c9ad5a64a7e2531bc84d26f942d30ecbf3f7097cb25f5ad5841756e3e61e.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT flow_status->'failure_module'->>'parent_module' FROM v2_job_flow_runtime WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "?column?", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + null + ] + }, + "hash": "f5d8c9ad5a64a7e2531bc84d26f942d30ecbf3f7097cb25f5ad5841756e3e61e" +} diff --git a/backend/.sqlx/query-f746c5d2ffa24dd75124b5f8d04f587c9581ef407417ec979b8a3ab07816cc69.json b/backend/.sqlx/query-f746c5d2ffa24dd75124b5f8d04f587c9581ef407417ec979b8a3ab07816cc69.json new file mode 100644 index 0000000000000..c65cfb397aa98 --- /dev/null +++ b/backend/.sqlx/query-f746c5d2ffa24dd75124b5f8d04f587c9581ef407417ec979b8a3ab07816cc69.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job SET trigger = REGEXP_REPLACE(trigger,'u/' || $2 || '/(.*)','u/' || $1 || '/\\1') WHERE trigger LIKE ('u/' || $2 || '/%') AND workspace_id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "f746c5d2ffa24dd75124b5f8d04f587c9581ef407417ec979b8a3ab07816cc69" +} diff --git a/backend/.sqlx/query-fc7c71d4744a2b5d5511a04eb1bf8268962b79a6884d7011fd02fd4809eede10.json b/backend/.sqlx/query-fc7c71d4744a2b5d5511a04eb1bf8268962b79a6884d7011fd02fd4809eede10.json new file mode 100644 index 0000000000000..d811fcade7002 --- /dev/null +++ b/backend/.sqlx/query-fc7c71d4744a2b5d5511a04eb1bf8268962b79a6884d7011fd02fd4809eede10.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE v2_job_flow_runtime SET\n flow_status = JSONB_SET(\n JSONB_SET(flow_status, ARRAY['failure_module'], $1),\n ARRAY['step'],\n $2\n )\n WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Jsonb", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "fc7c71d4744a2b5d5511a04eb1bf8268962b79a6884d7011fd02fd4809eede10" +} diff --git a/backend/.sqlx/query-fdedd3909a97db5d43d9c46ff77b800b8efd647121b538deb023f96dbaac3715.json b/backend/.sqlx/query-fdedd3909a97db5d43d9c46ff77b800b8efd647121b538deb023f96dbaac3715.json new file mode 100644 index 0000000000000..905707255f29e --- /dev/null +++ b/backend/.sqlx/query-fdedd3909a97db5d43d9c46ff77b800b8efd647121b538deb023f96dbaac3715.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT args AS \"args: Json>>\"\n FROM v2_job WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "args: Json>>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + true + ] + }, + "hash": "fdedd3909a97db5d43d9c46ff77b800b8efd647121b538deb023f96dbaac3715" +} diff --git a/backend/.sqlx/query-ff0403790674cdb07022af71c2377afbd8b3a660b3be27514b517c077c63c238.json b/backend/.sqlx/query-ff0403790674cdb07022af71c2377afbd8b3a660b3be27514b517c077c63c238.json new file mode 100644 index 0000000000000..6a4a3b3d75a56 --- /dev/null +++ b/backend/.sqlx/query-ff0403790674cdb07022af71c2377afbd8b3a660b3be27514b517c077c63c238.json @@ -0,0 +1,84 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO v2_job (id, runnable_id, runnable_path, kind, script_lang, tag, created_by, permissioned_as, permissioned_as_email, workspace_id) (SELECT gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9 FROM generate_series(1, $10)) RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Uuid" + } + ], + "parameters": { + "Left": [ + "Int8", + "Varchar", + { + "Custom": { + "name": "job_kind", + "kind": { + "Enum": [ + "script", + "preview", + "flow", + "dependencies", + "flowpreview", + "script_hub", + "identity", + "flowdependencies", + "http", + "graphql", + "postgresql", + "noop", + "appdependencies", + "deploymentcallback", + "singlescriptflow", + "flowscript", + "flownode", + "appscript" + ] + } + } + }, + { + "Custom": { + "name": "script_lang", + "kind": { + "Enum": [ + "python3", + "deno", + "go", + "bash", + "postgresql", + "nativets", + "bun", + "mysql", + "bigquery", + "snowflake", + "graphql", + "powershell", + "mssql", + "php", + "bunnative", + "rust", + "ansible", + "csharp", + "oracledb" + ] + } + } + }, + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "ff0403790674cdb07022af71c2377afbd8b3a660b3be27514b517c077c63c238" +} diff --git a/backend/ee-repo-ref.txt b/backend/ee-repo-ref.txt index 9f385d12b5e6a..764140a73e6cf 100644 --- a/backend/ee-repo-ref.txt +++ b/backend/ee-repo-ref.txt @@ -1 +1 @@ -0c89b8974ff6e1c9eda2134f09d4a03f18b57c15 +8201cd10e92992915ddd9ecd26df92347a14a2fb diff --git a/backend/migrations/20250117145630_v2_queue_compatibility_view.down.sql b/backend/migrations/20250117145630_v2_queue_compatibility_view.down.sql new file mode 100644 index 0000000000000..23bb72031f378 --- /dev/null +++ b/backend/migrations/20250117145630_v2_queue_compatibility_view.down.sql @@ -0,0 +1,2 @@ +-- Add down migration script here +DROP VIEW v2_as_queue; diff --git a/backend/migrations/20250117145630_v2_queue_compatibility_view.up.sql b/backend/migrations/20250117145630_v2_queue_compatibility_view.up.sql new file mode 100644 index 0000000000000..9f90d178d0b3d --- /dev/null +++ b/backend/migrations/20250117145630_v2_queue_compatibility_view.up.sql @@ -0,0 +1,50 @@ +-- Add up migration script here +CREATE OR REPLACE VIEW v2_as_queue AS +SELECT + j.id, + j.workspace_id, + j.parent_job, + j.created_by, + j.created_at, + q.started_at, + q.scheduled_for, + q.running, + j.runnable_id AS script_hash, + j.runnable_path AS script_path, + j.args, + j.raw_code, + q.canceled_by IS NOT NULL AS canceled, + q.canceled_by, + q.canceled_reason, + r.ping AS last_ping, + j.kind AS job_kind, + CASE WHEN j.trigger_kind = 'schedule'::job_trigger_kind THEN j.trigger END + AS schedule_path, + j.permissioned_as, + f.flow_status, + j.raw_flow, + j.flow_step_id IS NOT NULL AS is_flow_step, + j.script_lang AS language, + q.suspend, + q.suspend_until, + j.same_worker, + j.raw_lock, + j.pre_run_error, + j.permissioned_as_email AS email, + j.visible_to_owner, + r.memory_peak AS mem_peak, + j.flow_root_job AS root_job, + f.leaf_jobs, + j.tag, + j.concurrent_limit, + j.concurrency_time_window_s, + j.timeout, + j.flow_step_id, + j.cache_ttl, + j.priority, + NULL::TEXT AS logs +FROM v2_job_queue q + JOIN v2_job j USING (id) + LEFT JOIN v2_job_runtime r USING (id) + LEFT JOIN v2_job_flow_runtime f USING (id) +; diff --git a/backend/migrations/20250117145631_v2_completed_job_compatibility_view.down.sql b/backend/migrations/20250117145631_v2_completed_job_compatibility_view.down.sql new file mode 100644 index 0000000000000..90ce271f4a094 --- /dev/null +++ b/backend/migrations/20250117145631_v2_completed_job_compatibility_view.down.sql @@ -0,0 +1,2 @@ +-- Add up migration script here +DROP VIEW v2_as_completed_job; diff --git a/backend/migrations/20250117145631_v2_completed_job_compatibility_view.up.sql b/backend/migrations/20250117145631_v2_completed_job_compatibility_view.up.sql new file mode 100644 index 0000000000000..6d04a518ae124 --- /dev/null +++ b/backend/migrations/20250117145631_v2_completed_job_compatibility_view.up.sql @@ -0,0 +1,39 @@ +-- Add up migration script here +CREATE OR REPLACE VIEW v2_as_completed_job AS +SELECT + j.id, + j.workspace_id, + j.parent_job, + j.created_by, + j.created_at, + c.duration_ms, + c.status = 'success' AS success, + j.runnable_id AS script_hash, + j.runnable_path AS script_path, + j.args, + c.result, + FALSE AS deleted, + j.raw_code, + c.status = 'canceled' AS canceled, + c.canceled_by, + c.canceled_reason, + j.kind AS job_kind, + CASE WHEN j.trigger_kind = 'schedule'::job_trigger_kind THEN j.trigger END + AS schedule_path, + j.permissioned_as, + c.flow_status, + j.raw_flow, + j.flow_step_id IS NOT NULL AS is_flow_step, + j.script_lang AS language, + c.started_at, + c.status = 'skipped' AS is_skipped, + j.raw_lock, + j.permissioned_as_email AS email, + j.visible_to_owner, + c.memory_peak AS mem_peak, + j.tag, + j.priority, + NULL::TEXT AS logs +FROM v2_job_completed c + JOIN v2_job j USING (id) +; diff --git a/backend/src/monitor.rs b/backend/src/monitor.rs index 30d7dedda92a3..718e741a68314 100644 --- a/backend/src/monitor.rs +++ b/backend/src/monitor.rs @@ -736,7 +736,13 @@ pub async fn delete_expired_items(db: &DB) -> () { match db.begin().await { Ok(mut tx) => { let deleted_jobs = sqlx::query_scalar!( - "DELETE FROM completed_job WHERE created_at <= now() - ($1::bigint::text || ' s')::interval AND started_at + ((duration_ms/1000 + $1::bigint) || ' s')::interval <= now() RETURNING id AS \"id!\"", + "DELETE FROM v2_job_completed c + USING v2_job j + WHERE + created_at <= now() - ($1::bigint::text || ' s')::interval + AND completed_at + ($1::bigint::text || ' s')::interval <= now() + AND c.id = j.id + RETURNING c.id", job_retention_secs ) .fetch_all(&mut *tx) @@ -788,7 +794,7 @@ pub async fn delete_expired_items(db: &DB) -> () { } if let Err(e) = - sqlx::query!("DELETE FROM job WHERE id = ANY($1)", &deleted_jobs) + sqlx::query!("DELETE FROM v2_job WHERE id = ANY($1)", &deleted_jobs) .execute(&mut *tx) .await { @@ -1342,13 +1348,19 @@ pub async fn expose_queue_metrics(db: &Pool) { .ok(); if count > 0 { sqlx::query!( - "INSERT INTO metrics (id, value) - VALUES ($1, to_jsonb((SELECT EXTRACT(EPOCH FROM now() - scheduled_for) - FROM queue WHERE tag = $2 AND running = false AND scheduled_for <= now() - ('3 seconds')::interval - ORDER BY priority DESC NULLS LAST, scheduled_for LIMIT 1)))", - format!("queue_delay_{}", tag), - tag - ).execute(db).await.ok(); + "INSERT INTO metrics (id, value) + VALUES ($1, to_jsonb(( + SELECT EXTRACT(EPOCH FROM now() - scheduled_for) + FROM v2_job_queue + WHERE tag = $2 AND running = false AND scheduled_for <= now() - ('3 seconds')::interval + ORDER BY priority DESC NULLS LAST, scheduled_for LIMIT 1 + )))", + format!("queue_delay_{}", tag), + tag + ) + .execute(db) + .await + .ok(); } } } @@ -1507,9 +1519,14 @@ pub async fn reload_base_url_setting(db: &DB) -> error::Result<()> { async fn handle_zombie_jobs(db: &Pool, base_internal_url: &str, worker_name: &str) { if *RESTART_ZOMBIE_JOBS { let restarted = sqlx::query!( - "UPDATE queue SET running = false, started_at = null - WHERE last_ping < now() - ($1 || ' seconds')::interval - AND running = true AND job_kind NOT IN ('flow', 'flowpreview', 'flownode', 'singlescriptflow') AND same_worker = false RETURNING id AS \"id!\", workspace_id AS \"workspace_id!\", last_ping", + "UPDATE v2_job_queue q SET running = false, started_at = null + FROM v2_job j, v2_job_runtime r + WHERE j.id = q.id AND j.id = r.id + AND ping < now() - ($1 || ' seconds')::interval + AND running = true + AND kind NOT IN ('flow', 'flowpreview', 'flownode', 'singlescriptflow') + AND same_worker = false + RETURNING q.id, q.workspace_id, ping", *ZOMBIE_JOB_TIMEOUT, ) .fetch_all(db) @@ -1524,7 +1541,7 @@ async fn handle_zombie_jobs(db: &Pool, base_internal_url: &str, worker let base_url = BASE_URL.read().await.clone(); for r in restarted { - let last_ping = if let Some(x) = r.last_ping { + let last_ping = if let Some(x) = r.ping { format!("last ping at {x}") } else { "no last ping".to_string() @@ -1536,16 +1553,21 @@ async fn handle_zombie_jobs(db: &Pool, base_internal_url: &str, worker ); let _ = sqlx::query!(" - INSERT INTO job_logs (job_id, logs) VALUES ($1,'Restarted job after not receiving job''s ping for too long the ' || now() || '\n\n') - ON CONFLICT (job_id) DO UPDATE SET logs = job_logs.logs || '\nRestarted job after not receiving job''s ping for too long the ' || now() || '\n\n' WHERE job_logs.job_id = $1", r.id) - .execute(db).await; + INSERT INTO job_logs (job_id, logs) + VALUES ($1, 'Restarted job after not receiving job''s ping for too long the ' || now() || '\n\n') + ON CONFLICT (job_id) DO UPDATE SET logs = job_logs.logs || '\n' || EXCLUDED.logs + WHERE job_logs.job_id = $1", + r.id + ) + .execute(db) + .await; tracing::error!(error_message); report_critical_error(error_message, db.clone(), Some(&r.workspace_id), None).await; } } let mut timeout_query = - "SELECT * FROM queue WHERE last_ping < now() - ($1 || ' seconds')::interval + "SELECT * FROM v2_as_queue WHERE last_ping < now() - ($1 || ' seconds')::interval AND running = true AND job_kind NOT IN ('flow', 'flowpreview', 'flownode', 'singlescriptflow')" .to_string(); if *RESTART_ZOMBIE_JOBS { @@ -1631,7 +1653,7 @@ async fn handle_zombie_flows(db: &DB) -> error::Result<()> { SELECT id AS "id!", workspace_id AS "workspace_id!", parent_job, is_flow_step, flow_status AS "flow_status: Box", last_ping, same_worker - FROM queue + FROM v2_as_queue WHERE running = true AND suspend = 0 AND suspend_until IS null AND scheduled_for <= now() AND (job_kind = 'flow' OR job_kind = 'flowpreview' OR job_kind = 'flownode') AND last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval @@ -1662,7 +1684,10 @@ async fn handle_zombie_flows(db: &DB) -> error::Result<()> { report_critical_error(error_message, db.clone(), Some(&flow.workspace_id), None).await; // if the flow hasn't started and is a zombie, we can simply restart it sqlx::query!( - "UPDATE queue SET running = false, started_at = null WHERE id = $1 AND canceled = false", + "UPDATE v2_job_queue SET + running = false, + started_at = null + WHERE id = $1 AND canceled_by IS NULL", flow.id ) .execute(db) @@ -1689,8 +1714,8 @@ async fn handle_zombie_flows(db: &DB) -> error::Result<()> { DELETE FROM parallel_monitor_lock WHERE last_ping IS NOT NULL AND last_ping < NOW() - ($1 || ' seconds')::interval - RETURNING parent_flow_id, job_id, last_ping, (SELECT workspace_id FROM queue q - WHERE q.id = parent_flow_id AND q.running = true AND q.canceled = false) AS workspace_id + RETURNING parent_flow_id, job_id, last_ping, (SELECT workspace_id FROM v2_job_queue q + WHERE q.id = parent_flow_id AND q.running = true AND q.canceled_by IS NULL) AS workspace_id "#, FLOW_ZOMBIE_TRANSITION_TIMEOUT.as_str() ) diff --git a/backend/tests/fixtures/base.sql b/backend/tests/fixtures/base.sql index e6e4f9875a01c..b0dfd05c082a2 100644 --- a/backend/tests/fixtures/base.sql +++ b/backend/tests/fixtures/base.sql @@ -23,7 +23,7 @@ GRANT ALL PRIVILEGES ON TABLE workspace_key TO windmill_user; CREATE FUNCTION "notify_insert_on_completed_job" () RETURNS TRIGGER AS $$ BEGIN - PERFORM pg_notify('insert on completed_job', NEW.id::text); + PERFORM pg_notify('completed', NEW.id::text); RETURN NEW; END; $$ LANGUAGE PLPGSQL; @@ -37,7 +37,7 @@ EXECUTE FUNCTION "notify_insert_on_completed_job" (); CREATE FUNCTION "notify_queue" () RETURNS TRIGGER AS $$ BEGIN - PERFORM pg_notify('queue', NEW.id::text); + PERFORM pg_notify('queued', NEW.id::text); RETURN NEW; END; $$ LANGUAGE PLPGSQL; diff --git a/backend/tests/fixtures/result_format.sql b/backend/tests/fixtures/result_format.sql index 1f990e2d860de..0d70e56370d5a 100644 --- a/backend/tests/fixtures/result_format.sql +++ b/backend/tests/fixtures/result_format.sql @@ -1,3 +1,9 @@ +INSERT INTO public.v2_job ( + id, workspace_id, created_by, kind, script_lang +) VALUES ( + '1eecb96a-c8b0-4a3d-b1b6-087878c55e41', 'test-workspace', 'test-user', 'script', 'postgresql' +); + INSERT INTO public.completed_job ( id, workspace_id, created_by, created_at, duration_ms, success, flow_status, result, job_kind, language ) VALUES ( diff --git a/backend/tests/worker.rs b/backend/tests/worker.rs index 3f1c3f16c8809..d2ad1512ae98a 100644 --- a/backend/tests/worker.rs +++ b/backend/tests/worker.rs @@ -189,7 +189,6 @@ async fn set_jwt_secret() -> () { mod suspend_resume { use serde_json::json; - use sqlx::query_scalar; use super::*; @@ -200,11 +199,13 @@ mod suspend_resume { ) { loop { queue.by_ref().find(&flow).await.unwrap(); - if query_scalar("SELECT suspend > 0 FROM queue WHERE id = $1") - .bind(flow) - .fetch_one(db) - .await - .unwrap() + if sqlx::query_scalar!( + "SELECT suspend > 0 AS \"r!\" FROM v2_job_queue WHERE id = $1", + flow + ) + .fetch_one(db) + .await + .unwrap() { break; } @@ -358,7 +359,7 @@ mod suspend_resume { // ensure resumes are cleaned up through CASCADE when the flow is finished assert_eq!( 0, - query_scalar::<_, i64>("SELECT count(*) FROM resume_job") + sqlx::query_scalar!("SELECT count(*) AS \"count!\" FROM resume_job") .fetch_one(&db) .await .unwrap() @@ -925,7 +926,7 @@ impl RunJob { /* root job */ None, /* job_id */ None, /* is_flow_step */ false, - /* running */ false, + /* same_worker */ false, None, true, None, @@ -1047,11 +1048,11 @@ fn spawn_test_worker( } async fn listen_for_completed_jobs(db: &Pool) -> impl Stream + Unpin { - listen_for_uuid_on(db, "insert on completed_job").await + listen_for_uuid_on(db, "completed").await } async fn listen_for_queue(db: &Pool) -> impl Stream + Unpin { - listen_for_uuid_on(db, "queue").await + listen_for_uuid_on(db, "queued").await } async fn listen_for_uuid_on( @@ -1076,7 +1077,7 @@ async fn listen_for_uuid_on( async fn completed_job(uuid: Uuid, db: &Pool) -> CompletedJob { sqlx::query_as::<_, CompletedJob>( - "SELECT *, result->'wm_labels' as labels FROM completed_job WHERE id = $1", + "SELECT *, result->'wm_labels' as labels FROM v2_as_completed_job WHERE id = $1", ) .bind(uuid) .fetch_one(db) @@ -3188,11 +3189,13 @@ async fn test_script_schedule_handlers(db: Pool) { let uuid = uuid.unwrap().unwrap(); - let completed_job = - sqlx::query!("SELECT script_path FROM completed_job WHERE id = $1", uuid) - .fetch_one(&db2) - .await - .unwrap(); + let completed_job = sqlx::query!( + "SELECT script_path FROM v2_as_completed_job WHERE id = $1", + uuid + ) + .fetch_one(&db2) + .await + .unwrap(); if completed_job.script_path.is_none() || completed_job.script_path != Some("f/system/schedule_error_handler".to_string()) @@ -3257,7 +3260,7 @@ async fn test_script_schedule_handlers(db: Pool) { let uuid = uuid.unwrap().unwrap(); let completed_job = - sqlx::query!("SELECT script_path FROM completed_job WHERE id = $1", uuid) + sqlx::query!("SELECT script_path FROM v2_as_completed_job WHERE id = $1", uuid) .fetch_one(&db2) .await .unwrap(); @@ -3340,11 +3343,13 @@ async fn test_flow_schedule_handlers(db: Pool) { let uuid = uuid.unwrap().unwrap(); - let completed_job = - sqlx::query!("SELECT script_path FROM completed_job WHERE id = $1", uuid) - .fetch_one(&db2) - .await - .unwrap(); + let completed_job = sqlx::query!( + "SELECT script_path FROM v2_as_completed_job WHERE id = $1", + uuid + ) + .fetch_one(&db2) + .await + .unwrap(); if completed_job.script_path.is_none() || completed_job.script_path != Some("f/system/schedule_error_handler".to_string()) @@ -3410,7 +3415,7 @@ async fn test_flow_schedule_handlers(db: Pool) { let uuid = uuid.unwrap().unwrap(); let completed_job = - sqlx::query!("SELECT script_path FROM completed_job WHERE id = $1", uuid) + sqlx::query!("SELECT script_path FROM v2_as_completed_job WHERE id = $1", uuid) .fetch_one(&db2) .await .unwrap(); diff --git a/backend/windmill-api/src/apps.rs b/backend/windmill-api/src/apps.rs index f2e340394edfd..447bc922c3b03 100644 --- a/backend/windmill-api/src/apps.rs +++ b/backend/windmill-api/src/apps.rs @@ -16,7 +16,6 @@ use crate::{ webhook_util::{WebhookMessage, WebhookShared}, HTTP_CLIENT, }; -use windmill_common::variables::encrypt; #[cfg(feature = "parquet")] use crate::{ job_helpers_ee::{ @@ -54,6 +53,7 @@ use windmill_audit::audit_ee::audit_log; use windmill_audit::ActionKind; #[cfg(feature = "parquet")] use windmill_common::s3_helpers::build_object_store_client; +use windmill_common::variables::encrypt; use windmill_common::{ apps::{AppScriptId, ListAppQuery}, cache::{self, future::FutureCachedExt}, @@ -1794,7 +1794,7 @@ async fn check_if_allowed_to_access_s3_file_from_app( let allowed = opt_authed.is_some() || sqlx::query_scalar!( r#"SELECT EXISTS ( - SELECT 1 FROM completed_job + SELECT 1 FROM v2_as_completed_job WHERE workspace_id = $2 AND (job_kind = 'appscript' OR job_kind = 'preview') AND created_by = 'anonymous' diff --git a/backend/windmill-api/src/concurrency_groups.rs b/backend/windmill-api/src/concurrency_groups.rs index 945df75573a04..f6cc6b081680e 100644 --- a/backend/windmill-api/src/concurrency_groups.rs +++ b/backend/windmill-api/src/concurrency_groups.rs @@ -157,22 +157,22 @@ async fn get_concurrent_intervals( let lq = ListCompletedQuery { order_desc: Some(true), ..lq }; let lqc = lq.clone(); let lqq: ListQueueQuery = lqc.into(); - let mut sqlb_q = SqlBuilder::select_from("queue") + let mut sqlb_q = SqlBuilder::select_from("v2_as_queue") .fields(UnifiedJob::queued_job_fields()) .order_by("created_at", lq.order_desc.unwrap_or(true)) .limit(row_limit) .clone(); - let mut sqlb_c = SqlBuilder::select_from("completed_job") + let mut sqlb_c = SqlBuilder::select_from("v2_as_completed_job") .fields(UnifiedJob::completed_job_fields()) .order_by("started_at", lq.order_desc.unwrap_or(true)) .limit(row_limit) .clone(); - let mut sqlb_q_user = SqlBuilder::select_from("queue") + let mut sqlb_q_user = SqlBuilder::select_from("v2_as_queue") .fields(&["id"]) .order_by("created_at", lq.order_desc.unwrap_or(true)) .limit(row_limit) .clone(); - let mut sqlb_c_user = SqlBuilder::select_from("completed_job") + let mut sqlb_c_user = SqlBuilder::select_from("v2_as_completed_job") .fields(&["id"]) .order_by("started_at", lq.order_desc.unwrap_or(true)) .limit(row_limit) diff --git a/backend/windmill-api/src/flows.rs b/backend/windmill-api/src/flows.rs index 1b283d43f361e..7e65c003d7081 100644 --- a/backend/windmill-api/src/flows.rs +++ b/backend/windmill-api/src/flows.rs @@ -913,8 +913,12 @@ async fn update_flow( })?; if let Some(old_dep_job) = old_dep_job { sqlx::query!( - "UPDATE queue SET canceled = true WHERE id = $1", - old_dep_job + "UPDATE v2_job_queue SET + canceled_by = $2, + canceled_reason = 're-deployment' + WHERE id = $1", + old_dep_job, + &authed.username ) .execute(&mut *new_tx) .await diff --git a/backend/windmill-api/src/inputs.rs b/backend/windmill-api/src/inputs.rs index 1271be1620a39..1ce3fa922629b 100644 --- a/backend/windmill-api/src/inputs.rs +++ b/backend/windmill-api/src/inputs.rs @@ -133,7 +133,7 @@ async fn get_input_history( let mut tx = user_db.begin(&authed).await?; let sql = &format!( - "select id, created_at, created_by, 'null'::jsonb as args, success from completed_job \ + "select id, created_at, created_by, 'null'::jsonb as args, success from v2_as_completed_job \ where {} = $1 and job_kind = any($2) and workspace_id = $3 \ order by created_at desc limit $4 offset $5", r.runnable_type.column_name() @@ -204,16 +204,16 @@ async fn get_args_from_history_or_saved_input( let result_o = if let Some(input) = g.input { if input { sqlx::query_scalar!( - "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM input WHERE id = $1 AND workspace_id = $2", - job_or_input_id, - w_id, - g.allow_large.unwrap_or(true) - ) - .fetch_optional(&mut *tx) - .await? + "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM input WHERE id = $1 AND workspace_id = $2", + job_or_input_id, + w_id, + g.allow_large.unwrap_or(true) + ) + .fetch_optional(&mut *tx) + .await? } else { sqlx::query_scalar!( - "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM completed_job WHERE id = $1 AND workspace_id = $2", + "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM v2_job WHERE id = $1 AND workspace_id = $2", job_or_input_id, w_id, g.allow_large.unwrap_or(true) @@ -223,13 +223,15 @@ async fn get_args_from_history_or_saved_input( } } else { sqlx::query_scalar!( - "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM completed_job WHERE id = $1 AND workspace_id = $2 UNION ALL SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM input WHERE id = $1 AND workspace_id = $2", - job_or_input_id, - w_id, - g.allow_large.unwrap_or(true) - ) - .fetch_optional(&mut *tx) - .await? + "SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM v2_job WHERE id = $1 AND workspace_id = $2 + UNION ALL + SELECT CASE WHEN pg_column_size(args) < 40000 OR $3 THEN args ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as args FROM input WHERE id = $1 AND workspace_id = $2", + job_or_input_id, + w_id, + g.allow_large.unwrap_or(true) + ) + .fetch_optional(&mut *tx) + .await? }; tx.commit().await?; diff --git a/backend/windmill-api/src/job_metrics.rs b/backend/windmill-api/src/job_metrics.rs index 2af59383a89af..882ec30ef414f 100644 --- a/backend/windmill-api/src/job_metrics.rs +++ b/backend/windmill-api/src/job_metrics.rs @@ -171,7 +171,7 @@ async fn set_job_progress( if let Some(flow_job_id) = flow_job_id { // TODO: Return error if trying to set completed job? sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(flow_status, ARRAY['modules', flow_status->>'step', 'progress'], $1) WHERE id = $2", serde_json::json!(percent.clamp(0, 99)), diff --git a/backend/windmill-api/src/jobs.rs b/backend/windmill-api/src/jobs.rs index 2eb119fd083c3..8715de3c3c649 100644 --- a/backend/windmill-api/src/jobs.rs +++ b/backend/windmill-api/src/jobs.rs @@ -338,7 +338,7 @@ async fn compute_root_job_for_flow(db: &DB, w_id: &str, mut job_id: Uuid) -> err // TODO: use `root_job` ? loop { job_id = match sqlx::query_scalar!( - "SELECT parent_job FROM queue WHERE id = $1 AND workspace_id = $2", + "SELECT parent_job FROM v2_job WHERE id = $1 AND workspace_id = $2", job_id, w_id ) @@ -605,7 +605,7 @@ async fn get_flow_job_debug_info( let mut job_ids = vec![]; let jobs_with_root = sqlx::query_scalar!( - "SELECT id AS \"id!\" FROM queue WHERE workspace_id = $1 and root_job = $2", + "SELECT id FROM v2_job WHERE workspace_id = $1 and flow_root_job = $2", &w_id, &id, ) @@ -675,23 +675,23 @@ async fn get_job( } macro_rules! get_job_query { - ("completed_job_view", $($opts:tt)*) => { + ("v2_as_completed_job", $($opts:tt)*) => { get_job_query!( - @impl "completed_job_view", ($($opts)*), + @impl "v2_as_completed_job", ($($opts)*), "duration_ms, success, result, deleted, is_skipped, result->'wm_labels' as labels, \ CASE WHEN result is null or pg_column_size(result) < 90000 THEN result ELSE '\"WINDMILL_TOO_BIG\"'::jsonb END as result", ) }; - ("queue_view", $($opts:tt)*) => { + ("v2_as_queue", $($opts:tt)*) => { get_job_query!( - @impl "queue_view", ($($opts)*), + @impl "v2_as_queue", ($($opts)*), "scheduled_for, running, last_ping, suspend, suspend_until, same_worker, pre_run_error, visible_to_owner, \ root_job, leaf_jobs, concurrent_limit, concurrency_time_window_s, timeout, flow_step_id, cache_ttl", ) }; (@impl $table:literal, (with_logs: $with_logs:expr, $($rest:tt)*), $additional_fields:literal, $($args:tt)*) => { if $with_logs { - get_job_query!(@impl $table, ($($rest)*), $additional_fields, logs = const_format::formatcp!("right({}.logs, 20000)", $table), $($args)*) + get_job_query!(@impl $table, ($($rest)*), $additional_fields, logs = "right(job_logs.logs, 20000)", $($args)*) } else { get_job_query!(@impl $table, ($($rest)*), $additional_fields, logs = "null", $($args)*) } @@ -718,7 +718,7 @@ macro_rules! get_job_query { {logs} as logs, {code} as raw_code, canceled, canceled_by, canceled_reason, job_kind, \ schedule_path, permissioned_as, flow_status, {flow} as raw_flow, is_flow_step, language, \ {lock} as raw_lock, email, visible_to_owner, mem_peak, tag, priority, {additional_fields} \ - FROM {table} \ + FROM {table} LEFT JOIN job_logs ON id = job_id \ WHERE id = $1 AND {table}.workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3)) LIMIT 1", table = $table, additional_fields = $additional_fields, @@ -832,7 +832,7 @@ impl<'a> GetQuery<'a> { job_id: Uuid, workspace_id: &str, ) -> error::Result>> { - let query = get_job_query!("queue_view", + let query = get_job_query!("v2_as_queue", with_logs: self.with_logs, with_code: self.with_code, with_flow: self.with_flow, @@ -864,7 +864,7 @@ impl<'a> GetQuery<'a> { job_id: Uuid, workspace_id: &str, ) -> error::Result>> { - let query = get_job_query!("completed_job_view", + let query = get_job_query!("v2_as_completed_job", with_logs: self.with_logs, with_code: self.with_code, with_flow: self.with_flow, @@ -1012,10 +1012,10 @@ async fn get_job_logs( .flatten(); let record = sqlx::query!( - "SELECT created_by AS \"created_by!\", CONCAT(coalesce(completed_job.logs, ''), coalesce(job_logs.logs, '')) as logs, job_logs.log_offset, job_logs.log_file_index - FROM completed_job - LEFT JOIN job_logs ON job_logs.job_id = completed_job.id - WHERE completed_job.id = $1 AND completed_job.workspace_id = $2 AND ($3::text[] IS NULL OR completed_job.tag = ANY($3))", + "SELECT created_by AS \"created_by!\", CONCAT(coalesce(v2_as_completed_job.logs, ''), coalesce(job_logs.logs, '')) as logs, job_logs.log_offset, job_logs.log_file_index + FROM v2_as_completed_job + LEFT JOIN job_logs ON job_logs.job_id = v2_as_completed_job.id + WHERE v2_as_completed_job.id = $1 AND v2_as_completed_job.workspace_id = $2 AND ($3::text[] IS NULL OR v2_as_completed_job.tag = ANY($3))", id, w_id, tags.as_ref().map(|v| v.as_slice()) @@ -1049,10 +1049,10 @@ async fn get_job_logs( Ok(content_plain(Body::from(logs))) } else { let text = sqlx::query!( - "SELECT created_by AS \"created_by!\", CONCAT(coalesce(queue.logs, ''), coalesce(job_logs.logs, '')) as logs, coalesce(job_logs.log_offset, 0) as log_offset, job_logs.log_file_index - FROM queue - LEFT JOIN job_logs ON job_logs.job_id = queue.id - WHERE queue.id = $1 AND queue.workspace_id = $2 AND ($3::text[] IS NULL OR queue.tag = ANY($3))", + "SELECT created_by AS \"created_by!\", CONCAT(coalesce(v2_as_queue.logs, ''), coalesce(job_logs.logs, '')) as logs, coalesce(job_logs.log_offset, 0) as log_offset, job_logs.log_file_index + FROM v2_as_queue + LEFT JOIN job_logs ON job_logs.job_id = v2_as_queue.id + WHERE v2_as_queue.id = $1 AND v2_as_queue.workspace_id = $2 AND ($3::text[] IS NULL OR v2_as_queue.tag = ANY($3))", id, w_id, tags.as_ref().map(|v| v.as_slice()) @@ -1101,7 +1101,7 @@ async fn get_args( .flatten(); let record = sqlx::query!( "SELECT created_by AS \"created_by!\", args as \"args: sqlx::types::Json>\" - FROM completed_job + FROM v2_as_completed_job WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", id, &w_id, @@ -1123,7 +1123,7 @@ async fn get_args( } else { let record = sqlx::query!( "SELECT created_by AS \"created_by!\", args as \"args: sqlx::types::Json>\" - FROM queue + FROM v2_job WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", id, &w_id, @@ -1396,7 +1396,7 @@ pub fn list_queue_jobs_query( tags: Option>, ) -> SqlBuilder { let (limit, offset) = paginate_without_limits(pagination); - let mut sqlb = SqlBuilder::select_from("queue") + let mut sqlb = SqlBuilder::select_from("v2_as_queue") .fields(fields) .order_by("created_at", lq.order_desc.unwrap_or(true)) .limit(limit) @@ -1485,75 +1485,36 @@ async fn cancel_jobs( ) -> error::JsonResult> { let mut uuids = vec![]; let mut tx = db.begin().await?; - let trivial_jobs = sqlx::query!("INSERT INTO completed_job AS cj + let trivial_jobs = sqlx::query!("INSERT INTO v2_job_completed AS cj ( workspace_id , id - , parent_job - , created_by - , created_at - , started_at , duration_ms - , success - , script_hash - , script_path - , args , result - , raw_code - , raw_lock - , canceled , canceled_by , canceled_reason - , job_kind - , schedule_path - , permissioned_as , flow_status - , raw_flow - , is_flow_step - , is_skipped - , language - , email - , visible_to_owner - , mem_peak - , tag - , priority + , status + , worker ) - SELECT workspace_id - , id - , parent_job - , created_by - , created_at - , now() + SELECT q.workspace_id + , q.id , 0 - , false - , script_hash - , script_path - , args , $4 - , raw_code - , raw_lock - , true , $1 - , canceled_reason - , job_kind - , schedule_path - , permissioned_as - , flow_status - , raw_flow - , is_flow_step - , false - , language - , email - , visible_to_owner - , mem_peak - , tag - , priority FROM queue - WHERE id = any($2) AND running = false AND parent_job IS NULL AND workspace_id = $3 AND schedule_path IS NULL FOR UPDATE SKIP LOCKED + , 'cancel all' + , (SELECT flow_status FROM v2_job_flow_runtime WHERE id = q.id) + , 'canceled'::job_status + , worker + FROM v2_job_queue q + JOIN v2_job USING (id) + WHERE q.id = any($2) AND running = false AND parent_job IS NULL AND q.workspace_id = $3 AND trigger IS NULL + FOR UPDATE SKIP LOCKED ON CONFLICT (id) DO NOTHING RETURNING id AS \"id!\"", username, &jobs, w_id, serde_json::json!({"error": { "message": format!("Job canceled: cancel all by {username}"), "name": "Canceled", "reason": "cancel all", "canceler": username}})) .fetch_all(&mut *tx) .await?.into_iter().map(|x| x.id).collect::>(); sqlx::query!( - "DELETE FROM queue WHERE id = any($1) AND workspace_id = $2", + "DELETE FROM v2_job_queue WHERE id = any($1) AND workspace_id = $2", &trivial_jobs, w_id ) @@ -1621,7 +1582,7 @@ async fn cancel_selection( let mut tx = user_db.begin(&authed).await?; let tags = get_scope_tags(&authed).map(|v| v.iter().map(|s| s.to_string()).collect_vec()); let jobs_to_cancel = sqlx::query_scalar!( - "SELECT id AS \"id!\" FROM queue WHERE id = ANY($1) AND schedule_path IS NULL AND ($2::text[] IS NULL OR tag = ANY($2))", + "SELECT id AS \"id!\" FROM v2_as_queue WHERE id = ANY($1) AND schedule_path IS NULL AND ($2::text[] IS NULL OR tag = ANY($2))", &jobs, tags.as_ref().map(|v| v.as_slice()) ) @@ -1641,7 +1602,9 @@ async fn list_filtered_uuids( ) -> error::JsonResult> { require_admin(authed.is_admin, &authed.username)?; - let mut sqlb = SqlBuilder::select_from("queue").fields(&["id"]).clone(); + let mut sqlb = SqlBuilder::select_from("v2_as_queue") + .fields(&["id"]) + .clone(); sqlb = join_concurrency_key(lq.concurrency_key.as_ref(), sqlb); @@ -1678,7 +1641,7 @@ async fn count_queue_jobs( Ok(Json( sqlx::query_as!( QueueStats, - "SELECT coalesce(COUNT(*) FILTER(WHERE suspend = 0 AND running = false), 0) as \"database_length!\", coalesce(COUNT(*) FILTER(WHERE suspend > 0), 0) as \"suspended!\" FROM queue WHERE (workspace_id = $1 OR $2) AND scheduled_for <= now()", + "SELECT coalesce(COUNT(*) FILTER(WHERE suspend = 0 AND running = false), 0) as \"database_length!\", coalesce(COUNT(*) FILTER(WHERE suspend > 0), 0) as \"suspended!\" FROM v2_as_queue WHERE (workspace_id = $1 OR $2) AND scheduled_for <= now()", w_id, w_id == "admins" && cq.all_workspaces.unwrap_or(false), ) @@ -1700,7 +1663,7 @@ async fn count_completed_jobs_detail( Path(w_id): Path, Query(query): Query, ) -> error::JsonResult { - let mut sqlb = SqlBuilder::select_from("completed_job"); + let mut sqlb = SqlBuilder::select_from("v2_as_completed_job"); sqlb.field("COUNT(*) as count"); if !query.all_workspaces.unwrap_or(false) { @@ -1742,7 +1705,7 @@ async fn count_completed_jobs( Ok(Json( sqlx::query_as!( QueueStats, - "SELECT coalesce(COUNT(*), 0) as \"database_length!\", null::bigint as suspended FROM completed_job WHERE workspace_id = $1", + "SELECT coalesce(COUNT(*), 0) as \"database_length!\", null::bigint as suspended FROM v2_job_completed WHERE workspace_id = $1", w_id ) .fetch_one(&db) @@ -1956,7 +1919,7 @@ async fn resume_suspended_job_internal( if !approved { sqlx::query!( - "UPDATE queue SET suspend = 0 WHERE id = $1", + "UPDATE v2_job_queue SET suspend = 0 WHERE id = $1", parent_flow_info.id ) .execute(&mut *tx) @@ -2028,7 +1991,7 @@ async fn resume_immediately_if_relevant<'c>( if matches!(status.current_step(), Some(FlowStatusModule::WaitingForEvents { job, .. }) if job == &job_id) { sqlx::query!( - "UPDATE queue SET suspend = $1 WHERE id = $2", + "UPDATE v2_job_queue SET suspend = $1 WHERE id = $2", suspend, flow.id, ) @@ -2079,9 +2042,11 @@ async fn get_suspended_parent_flow_info(job_id: Uuid, db: &DB) -> error::Result< let flow = sqlx::query_as!( FlowInfo, r#" - SELECT id AS "id!", flow_status, suspend AS "suspend!", script_path - FROM queue - WHERE id = ( SELECT parent_job FROM queue WHERE id = $1 UNION ALL SELECT parent_job FROM completed_job WHERE id = $1) + SELECT q.id, f.flow_status, q.suspend, j.runnable_path AS script_path + FROM v2_job_queue q + JOIN v2_job j USING (id) + JOIN v2_job_flow_runtime f USING (id) + WHERE id = ( SELECT parent_job FROM v2_job WHERE id = $1 ) FOR UPDATE "#, job_id, @@ -2100,7 +2065,7 @@ async fn get_suspended_flow_info<'c>( FlowInfo, r#" SELECT id AS "id!", flow_status, suspend AS "suspend!", script_path - FROM queue + FROM v2_as_queue WHERE id = $1 "#, job_id, @@ -2159,11 +2124,7 @@ pub async fn get_suspended_job_flow( let flow_id = sqlx::query_scalar!( r#" SELECT parent_job - FROM queue - WHERE id = $1 AND workspace_id = $2 - UNION ALL - SELECT parent_job - FROM completed_job + FROM v2_job WHERE id = $1 AND workspace_id = $2 "#, job, @@ -2269,8 +2230,10 @@ fn conditionally_require_authed_user( return Ok(()); } } - let error_msg = format!("Only users from one of the following groups are allowed to approve this workflow: {}", - approval_conditions.user_groups_required.join(", ")); + let error_msg = format!( + "Only users from one of the following groups are allowed to approve this workflow: {}", + approval_conditions.user_groups_required.join(", ") + ); return Err(Error::PermissionDenied(error_msg)); } } @@ -2300,7 +2263,7 @@ pub async fn get_flow_user_state( let r = sqlx::query_scalar!( r#" SELECT flow_status->'user_states'->$1 - FROM queue + FROM v2_as_queue WHERE id = $2 AND workspace_id = $3 "#, key, @@ -2322,8 +2285,9 @@ pub async fn set_flow_user_state( let mut tx = user_db.begin(&authed).await?; let r = sqlx::query_scalar!( r#" - UPDATE queue SET flow_status = JSONB_SET(flow_status, ARRAY['user_states'], JSONB_SET(COALESCE(flow_status->'user_states', '{}'::jsonb), ARRAY[$1], $2)) - WHERE id = $3 AND workspace_id = $4 AND job_kind IN ('flow', 'flowpreview', 'flownode') RETURNING 1 + UPDATE v2_job_flow_runtime f SET flow_status = JSONB_SET(flow_status, ARRAY['user_states'], JSONB_SET(COALESCE(flow_status->'user_states', '{}'::jsonb), ARRAY[$1], $2)) + FROM v2_job j + WHERE f.id = $3 AND f.id = j.id AND j.workspace_id = $4 AND kind IN ('flow', 'flowpreview', 'flownode') RETURNING 1 "#, key, value, @@ -2761,7 +2725,7 @@ impl<'a> From for Job { parent_job: uj.parent_job, created_by: uj.created_by, created_at: uj.created_at, - started_at: uj.started_at.unwrap_or(uj.created_at), + started_at: uj.started_at, duration_ms: uj.duration_ms.unwrap(), success: uj.success.unwrap(), script_hash: uj.script_hash, @@ -3137,7 +3101,7 @@ pub async fn restart_flow( "SELECT script_path, args AS \"args: sqlx::types::Json>>\", tag AS \"tag!\", priority - FROM completed_job + FROM v2_as_completed_job WHERE id = $1 and workspace_id = $2", job_id, &w_id, @@ -3437,10 +3401,9 @@ pub async fn run_workflow_as_code( if !wkflow_query.skip_update.unwrap_or(false) { sqlx::query!( - "UPDATE queue SET flow_status = jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], jsonb_set(jsonb_set('{}'::jsonb, '{scheduled_for}', to_jsonb(now()::text)), '{name}', to_jsonb($4::text))) WHERE id = $2 AND workspace_id = $3", + "UPDATE v2_job_flow_runtime SET flow_status = jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], jsonb_set(jsonb_set('{}'::jsonb, '{scheduled_for}', to_jsonb(now()::text)), '{name}', to_jsonb($3::text))) WHERE id = $2", uuid.to_string(), job_id, - w_id, entrypoint ).execute(&mut *tx).await?; } else { @@ -3574,7 +3537,7 @@ pub async fn run_wait_result( language AS \"language: ScriptLang\", flow_status AS \"flow_status: sqlx::types::Json>\", success AS \"success!\" - FROM completed_job + FROM v2_as_completed_job WHERE id = $1 AND workspace_id = $2", uuid, &w_id @@ -3687,17 +3650,19 @@ pub async fn run_wait_result( async fn delete_job_metadata_after_use(db: &DB, job_uuid: Uuid) -> Result<(), Error> { sqlx::query!( - "UPDATE completed_job - SET logs = '##DELETED##', args = '{}'::jsonb, result = '{}'::jsonb - WHERE id = $1", + "UPDATE v2_job SET args = '{}'::jsonb WHERE id = $1", + job_uuid, + ) + .execute(db) + .await?; + sqlx::query!( + "UPDATE v2_job_completed SET result = '{}'::jsonb WHERE id = $1", job_uuid, ) .execute(db) .await?; sqlx::query!( - "UPDATE job_logs - SET logs = '##DELETED##' - WHERE job_id = $1", + "UPDATE job_logs SET logs = '##DELETED##' WHERE job_id = $1", job_uuid, ) .execute(db) @@ -3708,7 +3673,7 @@ async fn delete_job_metadata_after_use(db: &DB, job_uuid: Uuid) -> Result<(), Er pub async fn check_queue_too_long(db: &DB, queue_limit: Option) -> error::Result<()> { if let Some(limit) = queue_limit { let count = sqlx::query_scalar!( - "SELECT COUNT(*) FROM queue WHERE canceled = false AND (scheduled_for <= now() + "SELECT COUNT(*) FROM v2_as_queue WHERE canceled = false AND (scheduled_for <= now() OR (suspend_until IS NOT NULL AND ( suspend <= 0 OR suspend_until <= now())))", @@ -4818,48 +4783,68 @@ async fn add_batch_jobs( let uuids = sqlx::query_scalar!( r#"WITH uuid_table as ( - select gen_random_uuid() as uuid from generate_series(1, $6) + select gen_random_uuid() as uuid from generate_series(1, $16) ) - INSERT INTO job - (id, workspace_id, raw_code, raw_lock, raw_flow, tag) - (SELECT uuid, $1, $2, $3, $4, $5 FROM uuid_table) + INSERT INTO v2_job + (id, workspace_id, raw_code, raw_lock, raw_flow, tag, runnable_id, runnable_path, kind, + script_lang, created_by, permissioned_as, permissioned_as_email, concurrent_limit, + concurrency_time_window_s, timeout, args) + (SELECT uuid, $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, + ('{ "uuid": "' || uuid || '" }')::jsonb FROM uuid_table) RETURNING id AS "id!""#, w_id, raw_code, raw_lock, raw_flow.map(sqlx::types::Json) as Option>, tag, - n + hash.map(|h| h.0), + path, + job_kind.clone() as JobKind, + language as ScriptLang, + authed.username, + username_to_permissioned_as(&authed.username), + authed.email, + concurrent_limit, + concurrent_time_window_s, + timeout, + n, ) .fetch_all(&mut *tx) .await?; let uuids = sqlx::query_scalar!( r#"WITH uuid_table as ( - select unnest($11::uuid[]) as uuid + select unnest($4::uuid[]) as uuid ) - INSERT INTO queue - (id, script_hash, script_path, job_kind, language, args, tag, created_by, permissioned_as, email, scheduled_for, workspace_id, concurrent_limit, concurrency_time_window_s, timeout, flow_status) - (SELECT uuid, $1, $2, $3, $4, ('{ "uuid": "' || uuid || '" }')::jsonb, $5, $6, $7, $8, $9, $10, $12, $13, $14, $15 FROM uuid_table) - RETURNING id AS "id!""#, - hash.map(|h| h.0), - path, - job_kind.clone() as JobKind, - language as ScriptLang, - tag, - authed.username, - username_to_permissioned_as(&authed.username), - authed.email, - Utc::now(), - w_id, + INSERT INTO v2_job_queue + (id, workspace_id, scheduled_for, tag) + (SELECT uuid, $1, $2, $3 FROM uuid_table) + RETURNING id"#, + w_id, + Utc::now(), + tag, + &uuids + ) + .fetch_all(&mut *tx) + .await?; + + sqlx::query!( + "INSERT INTO v2_job_runtime (id) SELECT unnest($1::uuid[])", + &uuids, + ) + .execute(&mut *tx) + .await?; + + if let Some(flow_status) = flow_status { + sqlx::query!( + "INSERT INTO v2_job_flow_runtime (id, flow_status) + SELECT unnest($1::uuid[]), $2", &uuids, - concurrent_limit, - concurrent_time_window_s, - timeout, - flow_status.map(sqlx::types::Json) as Option> + sqlx::types::Json(flow_status) as sqlx::types::Json ) - .fetch_all(&mut *tx) + .execute(&mut *tx) .await?; + } if let Some(custom_concurrency_key) = custom_concurrency_key { sqlx::query!( @@ -5139,14 +5124,14 @@ async fn get_job_update( let record = sqlx::query!( "SELECT running AS \"running!\", - substr(concat(coalesce(queue.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs, + substr(concat(coalesce(v2_as_queue.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs, mem_peak, CASE WHEN is_flow_step is true then NULL else flow_status END AS \"flow_status: sqlx::types::Json>\", job_logs.log_offset + char_length(job_logs.logs) + 1 AS log_offset, created_by AS \"created_by!\" - FROM queue - LEFT JOIN job_logs ON job_logs.job_id = queue.id - WHERE queue.workspace_id = $2 AND queue.id = $3", + FROM v2_as_queue + LEFT JOIN job_logs ON job_logs.job_id = v2_as_queue.id + WHERE v2_as_queue.workspace_id = $2 AND v2_as_queue.id = $3", log_offset, &w_id, job_id @@ -5192,14 +5177,14 @@ async fn get_job_update( } else { let record = sqlx::query!( "SELECT - substr(concat(coalesce(completed_job.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs, + substr(concat(coalesce(v2_as_completed_job.logs, ''), job_logs.logs), greatest($1 - job_logs.log_offset, 0)) AS logs, mem_peak, CASE WHEN is_flow_step is true then NULL else flow_status END AS \"flow_status: sqlx::types::Json>\", job_logs.log_offset + char_length(job_logs.logs) + 1 AS log_offset, created_by AS \"created_by!\" - FROM completed_job - LEFT JOIN job_logs ON job_logs.job_id = completed_job.id - WHERE completed_job.workspace_id = $2 AND completed_job.id = $3", + FROM v2_as_completed_job + LEFT JOIN job_logs ON job_logs.job_id = v2_as_completed_job.id + WHERE v2_as_completed_job.workspace_id = $2 AND v2_as_completed_job.id = $3", log_offset, &w_id, job_id @@ -5346,7 +5331,7 @@ pub fn list_completed_jobs_query( join_outstanding_wait_times: bool, tags: Option>, ) -> SqlBuilder { - let mut sqlb = SqlBuilder::select_from("completed_job") + let mut sqlb = SqlBuilder::select_from("v2_as_completed_job") .fields(fields) .order_by("created_at", lq.order_desc.unwrap_or(true)) .offset(offset) @@ -5512,7 +5497,7 @@ async fn get_completed_job_result( flow_status AS \"flow_status: sqlx::types::Json>\", language AS \"language: ScriptLang\", created_by AS \"created_by!\" - FROM completed_job + FROM v2_as_completed_job WHERE id = $1 AND workspace_id = $2 AND ($4::text[] IS NULL OR tag = ANY($4))", id, &w_id, @@ -5529,7 +5514,7 @@ async fn get_completed_job_result( flow_status AS \"flow_status: sqlx::types::Json>\", language AS \"language: ScriptLang\", created_by AS \"created_by!\" - FROM completed_job + FROM v2_as_completed_job WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", id, &w_id, @@ -5547,7 +5532,7 @@ async fn get_completed_job_result( let mut parent_job = id; while parent_job != suspended_job { let p_job = sqlx::query_scalar!( - "SELECT parent_job FROM queue WHERE id = $1 AND workspace_id = $2", + "SELECT parent_job FROM v2_job WHERE id = $1 AND workspace_id = $2", parent_job, &w_id ) @@ -5614,7 +5599,7 @@ async fn count_by_tag( TagCount, r#" SELECT tag as "tag!", COUNT(*) as "count!" - FROM completed_job + FROM v2_as_completed_job WHERE started_at > NOW() - make_interval(secs => $1) AND ($2::text IS NULL OR workspace_id = $2) GROUP BY tag ORDER BY "count!" DESC @@ -5657,7 +5642,7 @@ async fn get_completed_job_result_maybe( language AS \"language: ScriptLang\", flow_status AS \"flow_status: sqlx::types::Json>\", created_by AS \"created_by!\" - FROM completed_job + FROM v2_as_completed_job WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3))", id, &w_id, @@ -5689,7 +5674,7 @@ async fn get_completed_job_result_maybe( .into_response()) } else if get_started.is_some_and(|x| x) { let started = sqlx::query_scalar!( - "SELECT running AS \"running!\" FROM queue WHERE id = $1 AND workspace_id = $2", + "SELECT running AS \"running!\" FROM v2_job_queue WHERE id = $1 AND workspace_id = $2", id, w_id ) @@ -5726,8 +5711,17 @@ async fn delete_completed_job<'a>( require_admin(authed.is_admin, &authed.username)?; let tags = get_scope_tags(&authed); let job_o = sqlx::query_as::<_, CompletedJob>( - "UPDATE completed_job SET args = null, logs = '', result = null, deleted = true WHERE id = $1 AND workspace_id = $2 AND ($3::text[] IS NULL OR tag = ANY($3)) \ - RETURNING *, null as labels", + "WITH mark_as_deleted AS ( + UPDATE v2_job_completed c SET + result = NULL, + deleted = TRUE + FROM v2_job j + WHERE c.id = $1 + AND j.id = c.id + AND c.workspace_id = $2 + AND ($3::TEXT[] IS NULL OR tag = ANY($3)) + RETURNING c.id + ) SELECT * FROM v2_as_completed_job WHERE id = (SELECT id FROM mark_as_deleted)", ) .bind(id) .bind(&w_id) @@ -5737,6 +5731,9 @@ async fn delete_completed_job<'a>( let cj = not_found_if_none(job_o, "Completed Job", id.to_string())?; + sqlx::query!("UPDATE v2_job SET args = NULL WHERE id = $1", id) + .execute(&mut *tx) + .await?; sqlx::query!("DELETE FROM job_logs WHERE job_id = $1", id) .execute(&mut *tx) .await?; diff --git a/backend/windmill-api/src/resources.rs b/backend/windmill-api/src/resources.rs index 8848ed866feb1..c41788337bed4 100644 --- a/backend/windmill-api/src/resources.rs +++ b/backend/windmill-api/src/resources.rs @@ -544,7 +544,7 @@ pub async fn transform_json_value<'c>( parent_job, permissioned_as AS \"permissioned_as!\", script_path, schedule_path, flow_step_id, root_job, scheduled_for AS \"scheduled_for!: chrono::DateTime\" - FROM queue WHERE id = $1 AND workspace_id = $2", + FROM v2_as_queue WHERE id = $1 AND workspace_id = $2", job_id, workspace ) @@ -557,7 +557,7 @@ pub async fn transform_json_value<'c>( let flow_path = if let Some(uuid) = job.parent_job { let mut tx: Transaction<'_, Postgres> = authed_transaction_or_default(authed, user_db.clone(), db).await?; - let p = sqlx::query_scalar!("SELECT script_path FROM queue WHERE id = $1", uuid) + let p = sqlx::query_scalar!("SELECT runnable_path FROM v2_job WHERE id = $1", uuid) .fetch_optional(&mut *tx) .await? .flatten(); diff --git a/backend/windmill-api/src/schedule.rs b/backend/windmill-api/src/schedule.rs index 79e8c4dcb49db..ff7a625272cca 100644 --- a/backend/windmill-api/src/schedule.rs +++ b/backend/windmill-api/src/schedule.rs @@ -407,8 +407,8 @@ async fn list_schedule_with_jobs( let mut tx = user_db.begin(&authed).await?; let (per_page, offset) = paginate(pagination); let rows = sqlx::query_as!(ScheduleWJobs, - "SELECT schedule.*, t.jobs FROM schedule, LATERAL ( SELECT ARRAY (SELECT json_build_object('id', id, 'success', success, 'duration_ms', duration_ms) FROM completed_job WHERE - completed_job.schedule_path = schedule.path AND completed_job.workspace_id = $1 AND parent_job IS NULL AND is_skipped = False ORDER BY started_at DESC LIMIT 20) AS jobs ) t + "SELECT schedule.*, t.jobs FROM schedule, LATERAL ( SELECT ARRAY (SELECT json_build_object('id', id, 'success', success, 'duration_ms', duration_ms) FROM v2_as_completed_job WHERE + v2_as_completed_job.schedule_path = schedule.path AND v2_as_completed_job.workspace_id = $1 AND parent_job IS NULL AND is_skipped = False ORDER BY started_at DESC LIMIT 20) AS jobs ) t WHERE schedule.workspace_id = $1 ORDER BY schedule.edited_at desc LIMIT $2 OFFSET $3", w_id, per_page as i64, @@ -832,17 +832,30 @@ pub struct EditSchedule { } pub async fn clear_schedule<'c>( - db: &mut Transaction<'c, Postgres>, + tx: &mut Transaction<'c, Postgres>, path: &str, w_id: &str, ) -> Result<()> { tracing::info!("Clearing schedule {}", path); sqlx::query!( - "DELETE FROM queue WHERE schedule_path = $1 AND running = false AND workspace_id = $2 AND is_flow_step = false", + "WITH to_delete AS ( + SELECT id FROM v2_job_queue + JOIN v2_job j USING (id) + WHERE trigger_kind = 'schedule' + AND trigger = $1 + AND j.workspace_id = $2 + AND flow_step_id IS NULL + AND running = false + FOR UPDATE + ), deleted AS ( + DELETE FROM v2_job_queue + WHERE id IN (SELECT id FROM to_delete) + RETURNING id + ) DELETE FROM v2_job WHERE id IN (SELECT id FROM deleted)", path, w_id ) - .execute(&mut **db) + .execute(&mut **tx) .await?; Ok(()) } diff --git a/backend/windmill-api/src/slack_approvals.rs b/backend/windmill-api/src/slack_approvals.rs index 64b536949d8ab..035c49709d17c 100644 --- a/backend/windmill-api/src/slack_approvals.rs +++ b/backend/windmill-api/src/slack_approvals.rs @@ -977,17 +977,17 @@ async fn get_modal_blocks( let (job_kind, script_hash, raw_flow, parent_job_id, created_at, created_by, script_path, args) = sqlx::query!( "SELECT - queue.job_kind AS \"job_kind!: JobKind\", - queue.script_hash AS \"script_hash: ScriptHash\", - queue.raw_flow AS \"raw_flow: sqlx::types::Json>\", - completed_job.parent_job AS \"parent_job: Uuid\", - completed_job.created_at AS \"created_at!: chrono::NaiveDateTime\", - completed_job.created_by AS \"created_by!\", - queue.script_path, - queue.args AS \"args: sqlx::types::Json>\" - FROM queue - JOIN completed_job ON completed_job.parent_job = queue.id - WHERE completed_job.id = $1 AND completed_job.workspace_id = $2 + v2_as_queue.job_kind AS \"job_kind!: JobKind\", + v2_as_queue.script_hash AS \"script_hash: ScriptHash\", + v2_as_queue.raw_flow AS \"raw_flow: sqlx::types::Json>\", + v2_as_completed_job.parent_job AS \"parent_job: Uuid\", + v2_as_completed_job.created_at AS \"created_at!: chrono::NaiveDateTime\", + v2_as_completed_job.created_by AS \"created_by!\", + v2_as_queue.script_path, + v2_as_queue.args AS \"args: sqlx::types::Json>\" + FROM v2_as_queue + JOIN v2_as_completed_job ON v2_as_completed_job.parent_job = v2_as_queue.id + WHERE v2_as_completed_job.id = $1 AND v2_as_completed_job.workspace_id = $2 LIMIT 1", job_id, &w_id diff --git a/backend/windmill-api/src/users.rs b/backend/windmill-api/src/users.rs index fdb40d97c5c8b..92f8149ce74ac 100644 --- a/backend/windmill-api/src/users.rs +++ b/backend/windmill-api/src/users.rs @@ -471,7 +471,7 @@ async fn list_user_usage( FROM usr , LATERAL ( SELECT COALESCE(SUM(duration_ms + 1000)/1000 , 0)::BIGINT executions - FROM completed_job + FROM v2_as_completed_job WHERE workspace_id = $1 AND job_kind NOT IN ('flow', 'flowpreview', 'flownode') AND email = usr.email @@ -2309,25 +2309,9 @@ async fn update_username_in_workpsace<'c>( .execute(&mut **tx) .await?; - // ---- queue ---- + // ---- v2_job ---- sqlx::query!( - r#"UPDATE queue SET script_path = REGEXP_REPLACE(script_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE script_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, - new_username, - old_username, - w_id - ).execute(&mut **tx) - .await?; - - sqlx::query!( - r#"UPDATE queue SET schedule_path = REGEXP_REPLACE(schedule_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE schedule_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, - new_username, - old_username, - w_id - ).execute(&mut **tx) - .await?; - - sqlx::query!( - "UPDATE queue SET permissioned_as = ('u/' || $1) WHERE permissioned_as = ('u/' || $2) AND workspace_id = $3", + r#"UPDATE v2_job SET runnable_path = REGEXP_REPLACE(runnable_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE runnable_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, new_username, old_username, w_id @@ -2336,44 +2320,25 @@ async fn update_username_in_workpsace<'c>( .await?; sqlx::query!( - "UPDATE queue SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", + r#"UPDATE v2_job SET trigger = REGEXP_REPLACE(trigger,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE trigger LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, new_username, old_username, w_id ) .execute(&mut **tx) - .await - .unwrap(); + .await?; sqlx::query!( - "UPDATE queue SET created_by = $1 WHERE created_by = $2 AND workspace_id = $3", + "UPDATE v2_job SET permissioned_as = ('u/' || $1) WHERE permissioned_as = ('u/' || $2) AND workspace_id = $3", new_username, old_username, w_id ) .execute(&mut **tx) - .await - .unwrap(); - - // ---- completed_job ---- - sqlx::query!( - r#"UPDATE completed_job SET script_path = REGEXP_REPLACE(script_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE script_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, - new_username, - old_username, - w_id - ).execute(&mut **tx) .await?; sqlx::query!( - r#"UPDATE completed_job SET schedule_path = REGEXP_REPLACE(schedule_path,'u/' || $2 || '/(.*)','u/' || $1 || '/\1') WHERE schedule_path LIKE ('u/' || $2 || '/%') AND workspace_id = $3"#, - new_username, - old_username, - w_id - ).execute(&mut **tx) - .await?; - - sqlx::query!( - "UPDATE completed_job SET permissioned_as = ('u/' || $1) WHERE permissioned_as = ('u/' || $2) AND workspace_id = $3", + "UPDATE v2_job SET created_by = $1 WHERE created_by = $2 AND workspace_id = $3", new_username, old_username, w_id @@ -2381,25 +2346,25 @@ async fn update_username_in_workpsace<'c>( .execute(&mut **tx) .await?; + // ---- v2_job_queue ---- sqlx::query!( - "UPDATE completed_job SET created_by = $1 WHERE created_by = $2 AND workspace_id = $3", + "UPDATE v2_job_queue SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", new_username, old_username, w_id ) .execute(&mut **tx) - .await - .unwrap(); + .await?; + // ---- v2_job_completed ---- sqlx::query!( - "UPDATE completed_job SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", + "UPDATE v2_job_completed SET canceled_by = $1 WHERE canceled_by = $2 AND workspace_id = $3", new_username, old_username, w_id ) .execute(&mut **tx) - .await - .unwrap(); + .await?; // ---- resources---- sqlx::query!( diff --git a/backend/windmill-api/src/websocket_triggers.rs b/backend/windmill-api/src/websocket_triggers.rs index f5a2884214406..a5e86e3c38c56 100644 --- a/backend/windmill-api/src/websocket_triggers.rs +++ b/backend/windmill-api/src/websocket_triggers.rs @@ -560,7 +560,7 @@ async fn wait_runnable_result( let result = sqlx::query!( "SELECT result AS \"result: SqlxJson>\", success AS \"success!\" - FROM completed_job WHERE id = $1 AND workspace_id = $2", + FROM v2_as_completed_job WHERE id = $1 AND workspace_id = $2", Uuid::parse_str(&job_id)?, workspace_id ) diff --git a/backend/windmill-api/src/workspaces_extra.rs b/backend/windmill-api/src/workspaces_extra.rs index a8f0c8721dcb5..2378e85c0271d 100644 --- a/backend/windmill-api/src/workspaces_extra.rs +++ b/backend/windmill-api/src/workspaces_extra.rs @@ -144,7 +144,7 @@ pub(crate) async fn change_workspace_id( .await?; sqlx::query!( - "UPDATE completed_job SET workspace_id = $1 WHERE workspace_id = $2", + "UPDATE v2_job_completed SET workspace_id = $1 WHERE workspace_id = $2", &rw.new_id, &old_id ) @@ -269,7 +269,7 @@ pub(crate) async fn change_workspace_id( .await?; sqlx::query!( - "UPDATE queue SET workspace_id = $1 WHERE workspace_id = $2", + "UPDATE v2_job_queue SET workspace_id = $1 WHERE workspace_id = $2", &rw.new_id, &old_id ) @@ -277,7 +277,7 @@ pub(crate) async fn change_workspace_id( .await?; sqlx::query!( - "UPDATE job SET workspace_id = $1 WHERE workspace_id = $2", + "UPDATE v2_job SET workspace_id = $1 WHERE workspace_id = $2", &rw.new_id, &old_id ) @@ -430,7 +430,10 @@ pub(crate) async fn delete_workspace( sqlx::query!("DELETE FROM dependency_map WHERE workspace_id = $1", &w_id) .execute(&mut *tx) .await?; - sqlx::query!("DELETE FROM queue WHERE workspace_id = $1", &w_id) + sqlx::query!("DELETE FROM v2_job_queue WHERE workspace_id = $1", &w_id) + .execute(&mut *tx) + .await?; + sqlx::query!("DELETE FROM v2_job WHERE workspace_id = $1", &w_id) .execute(&mut *tx) .await?; sqlx::query!("DELETE FROM capture WHERE workspace_id = $1", &w_id) @@ -468,9 +471,12 @@ pub(crate) async fn delete_workspace( .execute(&mut *tx) .await?; - sqlx::query!("DELETE FROM completed_job WHERE workspace_id = $1", &w_id) - .execute(&mut *tx) - .await?; + sqlx::query!( + "DELETE FROM v2_job_completed WHERE workspace_id = $1", + &w_id + ) + .execute(&mut *tx) + .await?; sqlx::query!("DELETE FROM job_stats WHERE workspace_id = $1", &w_id) .execute(&mut *tx) diff --git a/backend/windmill-common/src/bench.rs b/backend/windmill-common/src/bench.rs index 0e89af571c2f2..649a202ddb685 100644 --- a/backend/windmill-common/src/bench.rs +++ b/backend/windmill-common/src/bench.rs @@ -84,6 +84,7 @@ pub async fn benchmark_init(benchmark_jobs: i32, db: &DB) { let benchmark_kind = std::env::var("BENCHMARK_KIND").unwrap_or("noop".to_string()); if benchmark_jobs > 0 { + let mut tx = db.begin().await.unwrap(); match benchmark_kind.as_str() { "dedicated" => { // you need to create the script first, check https://github.com/windmill-labs/windmill/blob/b76a92cfe454c686f005c65f534e29e039f3c706/benchmarks/lib.ts#L47 @@ -92,10 +93,10 @@ pub async fn benchmark_init(benchmark_jobs: i32, db: &DB) { "f/benchmarks/dedicated", "admins" ) - .fetch_one(db) + .fetch_one(&mut *tx) .await .unwrap_or_else(|_e| panic!("failed to insert dedicated jobs")); - sqlx::query!("INSERT INTO queue (id, script_hash, script_path, job_kind, language, tag, created_by, permissioned_as, email, scheduled_for, workspace_id) (SELECT gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 FROM generate_series(1, $11))", + let uuids = sqlx::query_scalar!("INSERT INTO v2_job (id, runnable_id, runnable_path, kind, script_lang, tag, created_by, permissioned_as, permissioned_as_email, workspace_id) (SELECT gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9 FROM generate_series(1, $10)) RETURNING id", hash, "f/benchmarks/dedicated", JobKind::Script as JobKind, @@ -104,12 +105,21 @@ pub async fn benchmark_init(benchmark_jobs: i32, db: &DB) { "admin", "u/admin", "admin@windmill.dev", - chrono::Utc::now(), "admins", benchmark_jobs ) - .execute(db) - .await.unwrap_or_else(|_e| panic!("failed to insert dedicated jobs")); + .fetch_all(&mut *tx) + .await.unwrap_or_else(|_e| panic!("failed to insert dedicated jobs (1)")); + sqlx::query!("INSERT INTO v2_job_queue (id, workspace_id, scheduled_for, tag) SELECT unnest($1::uuid[]), $2, now(), $3", &uuids, "admins", "admins:f/benchmarks/dedicated") + .execute(&mut *tx) + .await.unwrap_or_else(|_e| panic!("failed to insert dedicated jobs (2)")); + sqlx::query!( + "INSERT INTO v2_job_runtime (id) SELECT unnest($1::uuid[])", + &uuids + ) + .execute(&mut *tx) + .await + .unwrap_or_else(|_e| panic!("failed to insert dedicated jobs (3)")); } "parallelflow" => { //create dedicated script @@ -124,9 +134,9 @@ pub async fn benchmark_init(benchmark_jobs: i32, db: &DB) { "flow", "admin", ) - .execute(db) + .execute(&mut *tx) .await.unwrap_or_else(|_e| panic!("failed to insert parallelflow jobs {_e:#}")); - sqlx::query!("INSERT INTO queue (id, script_hash, script_path, job_kind, language, tag, created_by, permissioned_as, email, scheduled_for, workspace_id, raw_flow, flow_status) (SELECT gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12 FROM generate_series(1, 1))", + let uuids = sqlx::query_scalar!("INSERT INTO v2_job (id, runnable_id, runnable_path, kind, script_lang, tag, created_by, permissioned_as, permissioned_as_email, workspace_id, raw_flow) (SELECT gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 FROM generate_series(1, 1)) RETURNING id", None::, None::, JobKind::FlowPreview as JobKind, @@ -135,7 +145,6 @@ pub async fn benchmark_init(benchmark_jobs: i32, db: &DB) { "admin", "u/admin", "admin@windmill.dev", - chrono::Utc::now(), "admins", serde_json::from_str::(r#" { @@ -169,6 +178,20 @@ pub async fn benchmark_init(benchmark_jobs: i32, db: &DB) { "preprocessor_module": null } "#).unwrap(), + ) + .fetch_all(&mut *tx) + .await.unwrap_or_else(|_e| panic!("failed to insert parallelflow jobs (1)")); + sqlx::query!("INSERT INTO v2_job_queue (id, workspace_id, scheduled_for, tag) SELECT unnest($1::uuid[]), $2, now(), $3", &uuids, "admins", "flow") + .execute(&mut *tx) + .await.unwrap_or_else(|_e| panic!("failed to insert parallelflow jobs (2)")); + sqlx::query!( + "INSERT INTO v2_job_runtime (id) SELECT unnest($1::uuid[])", + &uuids + ) + .execute(&mut *tx) + .await + .unwrap_or_else(|_e| panic!("failed to insert parallelflow jobs (3)")); + sqlx::query!("INSERT INTO v2_job_flow_runtime (id, flow_status) SELECT unnest($1::uuid[]), $2", &uuids, serde_json::from_str::(r#" { "step": 0, @@ -188,11 +211,11 @@ pub async fn benchmark_init(benchmark_jobs: i32, db: &DB) { "#).unwrap() ) - .execute(db) - .await.unwrap_or_else(|_e| panic!("failed to insert parallelflow jobs")); + .execute(&mut *tx) + .await.unwrap_or_else(|_e| panic!("failed to insert parallelflow jobs (4)")); } _ => { - sqlx::query!("INSERT INTO queue (id, script_hash, script_path, job_kind, language, tag, created_by, permissioned_as, email, scheduled_for, workspace_id) (SELECT gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 FROM generate_series(1, $11))", + let uuids = sqlx::query_scalar!("INSERT INTO v2_job (id, runnable_id, runnable_path, kind, script_lang, tag, created_by, permissioned_as, permissioned_as_email, workspace_id) (SELECT gen_random_uuid(), $1, $2, $3, $4, $5, $6, $7, $8, $9 FROM generate_series(1, $10)) RETURNING id", None::, None::, JobKind::Noop as JobKind, @@ -201,13 +224,23 @@ pub async fn benchmark_init(benchmark_jobs: i32, db: &DB) { "admin", "u/admin", "admin@windmill.dev", - chrono::Utc::now(), "admins", benchmark_jobs ) - .execute(db) - .await.unwrap_or_else(|_e| panic!("failed to insert noop jobs")); + .fetch_all(&mut *tx) + .await.unwrap_or_else(|_e| panic!("failed to insert noop jobs (1)")); + sqlx::query!("INSERT INTO v2_job_queue (id, workspace_id, scheduled_for, tag) SELECT unnest($1::uuid[]), $2, now(), $3", &uuids, "admins", "deno") + .execute(&mut *tx) + .await.unwrap_or_else(|_e| panic!("failed to insert noop jobs (2)")); + sqlx::query!( + "INSERT INTO v2_job_runtime (id) SELECT unnest($1::uuid[])", + &uuids + ) + .execute(&mut *tx) + .await + .unwrap_or_else(|_e| panic!("failed to insert noop jobs (3)")); } } + tx.commit().await.unwrap(); } } diff --git a/backend/windmill-common/src/cache.rs b/backend/windmill-common/src/cache.rs index 13d71dd4028eb..1efe481aa6b8e 100644 --- a/backend/windmill-common/src/cache.rs +++ b/backend/windmill-common/src/cache.rs @@ -672,7 +672,7 @@ pub mod job { match (raw_lock, raw_code, raw_flow) { (None, None, None) => sqlx::query!( "SELECT raw_code, raw_lock, raw_flow AS \"raw_flow: Json>\" \ - FROM job WHERE id = $1 LIMIT 1", + FROM v2_job WHERE id = $1 LIMIT 1", job ) .fetch_optional(e) diff --git a/backend/windmill-common/src/jobs.rs b/backend/windmill-common/src/jobs.rs index 13a8dd7e8ae6d..5a784666275d0 100644 --- a/backend/windmill-common/src/jobs.rs +++ b/backend/windmill-common/src/jobs.rs @@ -197,7 +197,7 @@ pub struct CompletedJob { pub parent_job: Option, pub created_by: String, pub created_at: chrono::DateTime, - pub started_at: chrono::DateTime, + pub started_at: Option>, pub duration_ms: i64, pub success: bool, #[serde(skip_serializing_if = "Option::is_none")] @@ -251,12 +251,6 @@ impl CompletedJob { } } -#[derive(sqlx::FromRow)] -pub struct BranchResults { - pub result: sqlx::types::Json>, - pub id: Uuid, -} - #[derive(Debug, Clone)] pub enum JobPayload { ScriptHub { diff --git a/backend/windmill-common/src/queue.rs b/backend/windmill-common/src/queue.rs index 3b427e9bb44fd..39ec349c9168a 100644 --- a/backend/windmill-common/src/queue.rs +++ b/backend/windmill-common/src/queue.rs @@ -4,7 +4,7 @@ use sqlx::{Pool, Postgres}; pub async fn get_queue_counts(db: &Pool) -> HashMap { sqlx::query!( - "SELECT tag AS \"tag!\", count(*) AS \"count!\" FROM queue WHERE + "SELECT tag AS \"tag!\", count(*) AS \"count!\" FROM v2_job_queue WHERE scheduled_for <= now() - ('3 seconds')::interval AND running = false GROUP BY tag", ) diff --git a/backend/windmill-common/src/worker.rs b/backend/windmill-common/src/worker.rs index 36d9f14804808..67c05ef53d118 100644 --- a/backend/windmill-common/src/worker.rs +++ b/backend/windmill-common/src/worker.rs @@ -103,33 +103,65 @@ lazy_static::lazy_static! { pub static ref DISABLE_FLOW_SCRIPT: bool = std::env::var("DISABLE_FLOW_SCRIPT").ok().is_some_and(|x| x == "1" || x == "true"); } +fn format_pull_query(peek: String) -> String { + format!( + "WITH peek AS ( + {} + ), q AS ( + UPDATE v2_job_queue SET + running = true, + started_at = coalesce(started_at, now()), + suspend_until = null + WHERE id = (SELECT id FROM peek) + RETURNING + started_at, scheduled_for, running, + canceled_by, canceled_reason, canceled_by IS NOT NULL AS canceled, + suspend, suspend_until + ), r AS ( + UPDATE v2_job_runtime SET + ping = now() + WHERE id = (SELECT id FROM peek) + RETURNING ping AS last_ping, memory_peak AS mem_peak + ), j AS ( + SELECT + id, workspace_id, parent_job, created_by, created_at, runnable_id AS script_hash, + runnable_path AS script_path, args, kind AS job_kind, + CASE WHEN trigger_kind = 'schedule' THEN trigger END AS schedule_path, + permissioned_as, permissioned_as_email AS email, script_lang AS language, + flow_root_job AS root_job, flow_step_id, flow_step_id IS NOT NULL AS is_flow_step, + same_worker, pre_run_error, visible_to_owner, tag, concurrent_limit, + concurrency_time_window_s, timeout, cache_ttl, priority, raw_code, raw_lock, + raw_flow + FROM v2_job + WHERE id = (SELECT id FROM peek) + ) SELECT id, workspace_id, parent_job, created_by, created_at, started_at, scheduled_for, + running, script_hash, script_path, args, null as logs, canceled, canceled_by, + canceled_reason, last_ping, job_kind, schedule_path, permissioned_as, + flow_status, is_flow_step, language, suspend, suspend_until, + same_worker, pre_run_error, email, visible_to_owner, mem_peak, + root_job, leaf_jobs, tag, concurrent_limit, concurrency_time_window_s, + timeout, flow_step_id, cache_ttl, priority, + raw_code, raw_lock, raw_flow + FROM q, r, j + LEFT JOIN v2_job_flow_runtime f USING (id)", + peek + ) +} + pub async fn make_suspended_pull_query(wc: &WorkerConfig) { if wc.worker_tags.len() == 0 { tracing::error!("Empty tags in worker tags, skipping"); return; } - let query = format!( - "UPDATE queue - SET running = true - , started_at = coalesce(started_at, now()) - , last_ping = now() - , suspend_until = null - WHERE id = ( - SELECT id - FROM queue - WHERE suspend_until IS NOT NULL AND (suspend <= 0 OR suspend_until <= now()) AND tag IN ({}) - ORDER BY priority DESC NULLS LAST, created_at - FOR UPDATE SKIP LOCKED - LIMIT 1 - ) - RETURNING id, workspace_id, parent_job, created_by, created_at, started_at, scheduled_for, - running, script_hash, script_path, args, null as logs, canceled, canceled_by, - canceled_reason, last_ping, job_kind, schedule_path, permissioned_as, - flow_status, is_flow_step, language, suspend, suspend_until, - same_worker, pre_run_error, email, visible_to_owner, mem_peak, - root_job, leaf_jobs, tag, concurrent_limit, concurrency_time_window_s, - timeout, flow_step_id, cache_ttl, priority, - raw_code, raw_lock, raw_flow", wc.worker_tags.iter().map(|x| format!("'{x}'")).join(", ")); + let query = format_pull_query(format!( + "SELECT id + FROM v2_job_queue + WHERE suspend_until IS NOT NULL AND (suspend <= 0 OR suspend_until <= now()) AND tag IN ({}) + ORDER BY priority DESC NULLS LAST, created_at + FOR UPDATE SKIP LOCKED + LIMIT 1", + wc.worker_tags.iter().map(|x| format!("'{x}'")).join(", ") + )); let mut l = WORKER_SUSPENDED_PULL_QUERY.write().await; *l = query; } @@ -141,31 +173,17 @@ pub async fn make_pull_query(wc: &WorkerConfig) { tracing::error!("Empty tags in priority tags, skipping"); continue; } - let query = format!("UPDATE queue - SET running = true - , started_at = coalesce(started_at, now()) - , last_ping = now() - , suspend_until = null - WHERE id = ( - SELECT id - FROM queue + let query = format_pull_query(format!( + "SELECT id + FROM v2_job_queue WHERE running = false AND tag IN ({}) AND scheduled_for <= now() ORDER BY priority DESC NULLS LAST, scheduled_for FOR UPDATE SKIP LOCKED - LIMIT 1 - ) - RETURNING id, workspace_id, parent_job, created_by, created_at, started_at, scheduled_for, - running, script_hash, script_path, args, null as logs, canceled, canceled_by, - canceled_reason, last_ping, job_kind, schedule_path, permissioned_as, - flow_status, is_flow_step, language, suspend, suspend_until, - same_worker, pre_run_error, email, visible_to_owner, mem_peak, - root_job, leaf_jobs, tag, concurrent_limit, concurrency_time_window_s, - timeout, flow_step_id, cache_ttl, priority, - raw_code, raw_lock, raw_flow", tags.tags.iter().map(|x| format!("'{x}'")).join(", ")); - + LIMIT 1", + tags.tags.iter().map(|x| format!("'{x}'")).join(", ") + )); queries.push(query); } - let mut l = WORKER_PULL_QUERIES.write().await; *l = queries; } diff --git a/backend/windmill-queue/src/jobs.rs b/backend/windmill-queue/src/jobs.rs index 818e7b534d327..86edb86014168 100644 --- a/backend/windmill-queue/src/jobs.rs +++ b/backend/windmill-queue/src/jobs.rs @@ -34,7 +34,8 @@ use windmill_common::{ error::{self, to_anyhow, Error}, flow_status::{ BranchAllStatus, FlowCleanupModule, FlowStatus, FlowStatusModule, FlowStatusModuleWParent, - Iterator, JobResult, RestartedFrom, RetryStatus, MAX_RETRY_ATTEMPTS, MAX_RETRY_INTERVAL, + Iterator as FlowIterator, JobResult, RestartedFrom, RetryStatus, MAX_RETRY_ATTEMPTS, + MAX_RETRY_INTERVAL, }, flows::{ add_virtual_items_if_necessary, FlowModule, FlowModuleValue, FlowValue, InputTransform, @@ -49,8 +50,8 @@ use windmill_common::{ utils::{not_found_if_none, report_critical_error, StripPath, WarnAfterExt}, worker::{ to_raw_value, CLOUD_HOSTED, DEFAULT_TAGS_PER_WORKSPACE, DEFAULT_TAGS_WORKSPACES, - DISABLE_FLOW_SCRIPT, MIN_VERSION_IS_AT_LEAST_1_427, MIN_VERSION_IS_AT_LEAST_1_432, - MIN_VERSION_IS_AT_LEAST_1_440, NO_LOGS, WORKER_PULL_QUERIES, WORKER_SUSPENDED_PULL_QUERY, + DISABLE_FLOW_SCRIPT, MIN_VERSION_IS_AT_LEAST_1_432, MIN_VERSION_IS_AT_LEAST_1_440, NO_LOGS, + WORKER_PULL_QUERIES, WORKER_SUSPENDED_PULL_QUERY, }, DB, METRICS_ENABLED, }; @@ -173,7 +174,7 @@ pub async fn cancel_single_job<'c>( }); } else { let id: Option = sqlx::query_scalar!( - "UPDATE queue SET canceled = true, canceled_by = $1, canceled_reason = $2, scheduled_for = now(), suspend = 0 WHERE id = $3 AND workspace_id = $4 AND (canceled = false OR canceled_reason != $2) RETURNING id AS \"id!\"", + "UPDATE v2_job_queue SET canceled_by = $1, canceled_reason = $2, scheduled_for = now(), suspend = 0 WHERE id = $3 AND workspace_id = $4 AND (canceled_by IS NULL OR canceled_reason != $2) RETURNING id", username, reason, job_running.id, @@ -249,7 +250,7 @@ pub async fn cancel_job<'c>( while !jobs.is_empty() { let p_job = jobs.pop(); let new_jobs = sqlx::query_scalar!( - "SELECT id AS \"id!\" FROM queue WHERE parent_job = $1 AND workspace_id = $2", + "SELECT id AS \"id!\" FROM v2_job WHERE parent_job = $1 AND workspace_id = $2", p_job, w_id ) @@ -361,7 +362,7 @@ async fn cancel_persistent_script_jobs_internal<'c>( // we could have retrieved the job IDs in the first query where we retrieve the hashes, but just in case a job was inserted in the queue right in-between the two above query, we re-do the fetch here let jobs_to_cancel = sqlx::query_scalar::<_, Uuid>( - "SELECT id FROM queue WHERE workspace_id = $1 AND script_path = $2 AND canceled = false", + "SELECT id FROM v2_as_queue WHERE workspace_id = $1 AND script_path = $2 AND canceled = false", ) .bind(w_id) .bind(script_path) @@ -546,100 +547,40 @@ pub async fn add_completed_job( serde_json::to_string(&result).unwrap_or_else(|_| "".to_string()) ); - let (raw_code, raw_lock, raw_flow) = if !*MIN_VERSION_IS_AT_LEAST_1_427.read().await { - sqlx::query!( - "SELECT raw_code, raw_lock, raw_flow AS \"raw_flow: Json>\" - FROM job WHERE id = $1 AND workspace_id = $2 LIMIT 1", - &job_id, - &queued_job.workspace_id - ) - .fetch_one(db) - .map_ok(|record| (record.raw_code, record.raw_lock, record.raw_flow)) - .or_else(|_| { - sqlx::query!( - "SELECT raw_code, raw_lock, raw_flow AS \"raw_flow: Json>\" - FROM queue WHERE id = $1 AND workspace_id = $2 LIMIT 1", - &job_id, - &queued_job.workspace_id - ) - .fetch_one(db) - .map_ok(|record| (record.raw_code, record.raw_lock, record.raw_flow)) - }) - .await - .unwrap_or_default() - } else { - (None, None, None) - }; - let mem_peak = mem_peak.max(queued_job.mem_peak.unwrap_or(0)); // add_time!(bench, "add_completed_job query START"); let _duration = sqlx::query_scalar!( - "INSERT INTO completed_job AS cj + "INSERT INTO v2_job_completed AS cj ( workspace_id , id - , parent_job - , created_by - , created_at , started_at , duration_ms - , success - , script_hash - , script_path - , args , result - , raw_code - , raw_lock - , canceled , canceled_by , canceled_reason - , job_kind - , schedule_path - , permissioned_as , flow_status - , raw_flow - , is_flow_step - , is_skipped - , language - , email - , visible_to_owner - , mem_peak - , tag - , priority + , memory_peak + , status ) - VALUES ($1, $2, $3, $4, $5, COALESCE($6, now()), COALESCE($30::bigint, (EXTRACT('epoch' FROM (now())) - EXTRACT('epoch' FROM (COALESCE($6, now()))))*1000), $7, $8, $9,\ - $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29) - ON CONFLICT (id) DO UPDATE SET success = $7, result = $11 RETURNING duration_ms AS \"duration_ms!\"", - queued_job.workspace_id, - queued_job.id, - queued_job.parent_job, - queued_job.created_by, - queued_job.created_at, - queued_job.started_at, - success, - queued_job.script_hash.map(|x| x.0), - queued_job.script_path, - &queued_job.args as &Option>>>, - result as Json<&T>, - raw_code, - raw_lock, - canceled_by.is_some(), - canceled_by.clone().map(|cb| cb.username).flatten(), - canceled_by.clone().map(|cb| cb.reason).flatten(), - queued_job.job_kind.clone() as JobKind, - queued_job.schedule_path, - queued_job.permissioned_as, - &queued_job.flow_status as &Option>>, - &raw_flow as &Option>>, - queued_job.is_flow_step, - skipped, - queued_job.language.clone() as Option, - queued_job.email, - queued_job.visible_to_owner, - if mem_peak > 0 { Some(mem_peak) } else { None }, - queued_job.tag, - queued_job.priority, - duration, + VALUES ($1, $2, $3, COALESCE($12::bigint, (EXTRACT('epoch' FROM (now())) - EXTRACT('epoch' FROM (COALESCE($3, now()))))*1000), $5, $7, $8, $9,\ + $11, CASE WHEN $6::BOOL THEN 'canceled'::job_status + WHEN $10::BOOL THEN 'skipped'::job_status + WHEN $4::BOOL THEN 'success'::job_status + ELSE 'failure'::job_status END) + ON CONFLICT (id) DO UPDATE SET status = EXCLUDED.status, result = $5 RETURNING duration_ms AS \"duration_ms!\"", + /* $1 */ queued_job.workspace_id, + /* $2 */ queued_job.id, + /* $3 */ queued_job.started_at, + /* $4 */ success, + /* $5 */ result as Json<&T>, + /* $6 */ canceled_by.is_some(), + /* $7 */ canceled_by.clone().map(|cb| cb.username).flatten(), + /* $8 */ canceled_by.clone().map(|cb| cb.reason).flatten(), + /* $9 */ &queued_job.flow_status as &Option>>, + /* $10 */ skipped, + /* $11 */ if mem_peak > 0 { Some(mem_peak) } else { None }, + /* $12 */ duration, ) .fetch_one(&mut *tx) .await @@ -652,7 +593,7 @@ pub async fn add_completed_job( || queued_job.job_kind == JobKind::Preview) { if let Err(e) = sqlx::query!( - "UPDATE completed_job SET flow_status = q.flow_status FROM queue q WHERE completed_job.id = $1 AND q.id = $1 AND q.workspace_id = $2 AND completed_job.workspace_id = $2 AND q.flow_status IS NOT NULL", + "UPDATE v2_job_completed SET flow_status = f.flow_status FROM v2_job_flow_runtime f WHERE v2_job_completed.id = $1 AND f.id = $1 AND v2_job_completed.workspace_id = $2", &queued_job.id, &queued_job.workspace_id ) @@ -663,16 +604,25 @@ pub async fn add_completed_job( } if let Some(parent_job) = queued_job.parent_job { if let Err(e) = sqlx::query_scalar!( - "UPDATE queue SET flow_status = jsonb_set(jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], COALESCE(flow_status->$1, '{}'::jsonb)), array[$1, 'duration_ms'], to_jsonb($2::bigint)) WHERE id = $3 AND workspace_id = $4", - &queued_job.id.to_string(), - _duration, - parent_job, - &queued_job.workspace_id - ) - .execute(&mut *tx) - .await { - tracing::error!("Could not update parent job flow_status: {}", e); - } + "UPDATE v2_job_flow_runtime SET + flow_status = jsonb_set( + jsonb_set( + COALESCE(flow_status, '{}'::jsonb), + array[$1], + COALESCE(flow_status->$1, '{}'::jsonb) + ), + array[$1, 'duration_ms'], + to_jsonb($2::bigint) + ) + WHERE id = $3", + &queued_job.id.to_string(), + _duration, + parent_job + ) + .execute(&mut *tx) + .await { + tracing::error!("Could not update parent job flow_status: {}", e); + } } } // tracing::error!("Added completed job {:#?}", queued_job); @@ -689,12 +639,17 @@ pub async fn add_completed_job( parent_job ); sqlx::query!( - "UPDATE queue SET last_ping = now() WHERE id = $1 AND workspace_id = $2 AND canceled = false", - parent_job, - &queued_job.workspace_id - ) - .execute(&mut *tx) - .await?; + "UPDATE v2_job_runtime r SET + ping = now() + FROM v2_job_queue q + WHERE r.id = $1 AND q.id = r.id + AND q.workspace_id = $2 + AND canceled_by IS NULL", + parent_job, + &queued_job.workspace_id + ) + .execute(&mut *tx) + .await?; if flow_is_done { let r = sqlx::query_scalar!( "UPDATE parallel_monitor_lock SET last_ping = now() WHERE parent_flow_id = $1 and job_id = $2 RETURNING 1", @@ -703,9 +658,9 @@ pub async fn add_completed_job( ).fetch_optional(&mut *tx).await?; if r.is_some() { tracing::info!( - "parallel flow iteration is done, setting parallel monitor last ping lock for job {}", - &queued_job.id - ); + "parallel flow iteration is done, setting parallel monitor last ping lock for job {}", + &queued_job.id + ); } } } @@ -728,21 +683,21 @@ pub async fn add_completed_job( // otherwise flow rescheduling is done inside handle_flow let schedule_next_tick = !queued_job.is_flow() || !success && sqlx::query_scalar!( - "SELECT - flow_status->>'step' = '0' - AND ( - jsonb_array_length(flow_status->'modules') = 0 - OR flow_status->'modules'->0->>'type' = 'WaitingForPriorSteps' - OR ( - flow_status->'modules'->0->>'type' = 'Failure' - AND flow_status->'modules'->0->>'job' = $1 - ) - ) - FROM completed_job WHERE id = $2 AND workspace_id = $3", - Uuid::nil().to_string(), - &queued_job.id, - &queued_job.workspace_id - ).fetch_optional(&mut *tx).await?.flatten().unwrap_or(false); + "SELECT + flow_status->>'step' = '0' + AND ( + jsonb_array_length(flow_status->'modules') = 0 + OR flow_status->'modules'->0->>'type' = 'WaitingForPriorSteps' + OR ( + flow_status->'modules'->0->>'type' = 'Failure' + AND flow_status->'modules'->0->>'job' = $1 + ) + ) + FROM v2_job_completed WHERE id = $2 AND workspace_id = $3", + Uuid::nil().to_string(), + &queued_job.id, + &queued_job.workspace_id + ).fetch_optional(&mut *tx).await?.flatten().unwrap_or(false); if schedule_next_tick { if let Err(err) = handle_maybe_scheduled_job( @@ -782,7 +737,7 @@ pub async fn add_completed_job( let w_id: &String = &queued_job.workspace_id; if !matches!(err, Error::QuotaExceeded(_)) { report_error_to_workspace_handler_or_critical_side_channel( - &queued_job, + &queued_job, db, format!( "Failed to push schedule error handler job to handle failed job ({base_url}/run/{}?workspace={w_id}): {}", @@ -798,9 +753,9 @@ pub async fn add_completed_job( }; } else { tracing::error!( - "Schedule {schedule_path} in {} not found. Impossible to schedule again and apply schedule handlers", - &queued_job.workspace_id - ); + "Schedule {schedule_path} in {} not found. Impossible to schedule again and apply schedule handlers", + &queued_job.workspace_id + ); } } } @@ -818,10 +773,10 @@ pub async fn add_completed_job( } }; if let Err(e) = sqlx::query_scalar!( - "UPDATE concurrency_counter SET job_uuids = job_uuids - $2 WHERE concurrency_id = $1", - concurrency_key, - queued_job.id.hyphenated().to_string(), - ) + "UPDATE concurrency_counter SET job_uuids = job_uuids - $2 WHERE concurrency_id = $1", + concurrency_key, + queued_job.id.hyphenated().to_string(), + ) .execute(&mut *tx) .await { @@ -836,8 +791,8 @@ pub async fn add_completed_job( .await .map_err(|e| { Error::InternalErr(format!( - "Error updating to add ended_at timestamp concurrency_key={concurrency_key}: {e:#}" - )) + "Error updating to add ended_at timestamp concurrency_key={concurrency_key}: {e:#}" + )) }) { tracing::error!("Could not update concurrency_key: {}", e); } @@ -900,14 +855,15 @@ pub async fn add_completed_job( .await .map_err(|e| Error::InternalErr(format!("fetching if {w_id} is premium: {e:#}")))?; let _ = sqlx::query!( - "INSERT INTO usage (id, is_workspace, month_, usage) - VALUES ($1, TRUE, EXTRACT(YEAR FROM current_date) * 12 + EXTRACT(MONTH FROM current_date), $2) - ON CONFLICT (id, is_workspace, month_) DO UPDATE SET usage = usage.usage + $2", - w_id, - additional_usage as i32) - .execute(db) - .await - .map_err(|e| Error::InternalErr(format!("updating usage: {e:#}"))); + "INSERT INTO usage (id, is_workspace, month_, usage) + VALUES ($1, TRUE, EXTRACT(YEAR FROM current_date) * 12 + EXTRACT(MONTH FROM current_date), $2) + ON CONFLICT (id, is_workspace, month_) DO UPDATE SET usage = usage.usage + $2", + w_id, + additional_usage as i32 + ) + .execute(db) + .await + .map_err(|e| Error::InternalErr(format!("updating usage: {e:#}"))); if !premium_workspace { let _ = sqlx::query!( @@ -915,10 +871,11 @@ pub async fn add_completed_job( VALUES ($1, FALSE, EXTRACT(YEAR FROM current_date) * 12 + EXTRACT(MONTH FROM current_date), $2) ON CONFLICT (id, is_workspace, month_) DO UPDATE SET usage = usage.usage + $2", queued_job.email, - additional_usage as i32) - .execute(db) - .await - .map_err(|e| Error::InternalErr(format!("updating usage: {e:#}"))); + additional_usage as i32 + ) + .execute(db) + .await + .map_err(|e| Error::InternalErr(format!("updating usage: {e:#}"))); } } @@ -928,15 +885,14 @@ pub async fn add_completed_job( if let Ok(flow) = cache::job::fetch_flow(db, job.job_kind, job.script_hash).await { return flow.value().failure_module.is_some(); } - sqlx::query_scalar!("SELECT raw_flow->'failure_module' != 'null'::jsonb FROM job WHERE id = $1", job.id) - .fetch_one(db) - .or_else(|_| - sqlx::query_scalar!("SELECT raw_flow->'failure_module' != 'null'::jsonb FROM completed_job WHERE id = $1", job.id) - .fetch_one(db) - ) - .await - .unwrap_or(Some(false)) - .unwrap_or(false) + sqlx::query_scalar!( + "SELECT raw_flow->'failure_module' != 'null'::jsonb FROM v2_job WHERE id = $1", + job.id + ) + .fetch_one(db) + .await + .unwrap_or(Some(false)) + .unwrap_or(false) } if queued_job.email == ERROR_HANDLER_USER_EMAIL { @@ -1157,7 +1113,8 @@ pub async fn report_error_to_workspace_handler_or_critical_side_channel( let w_id = &queued_job.workspace_id; let (error_handler, error_handler_extra_args) = sqlx::query_as::<_, (Option, Option>>)>( "SELECT error_handler, error_handler_extra_args FROM workspace_settings WHERE workspace_id = $1", - ).bind(&w_id) + ) + .bind(&w_id) .fetch_optional(db) .await .ok() @@ -1209,7 +1166,8 @@ pub async fn send_error_to_workspace_handler<'a, 'c, T: Serialize + Send + Sync> let w_id = &queued_job.workspace_id; let (error_handler, error_handler_extra_args, error_handler_muted_on_cancel) = sqlx::query_as::<_, (Option, Option>>, bool)>( "SELECT error_handler, error_handler_extra_args, error_handler_muted_on_cancel FROM workspace_settings WHERE workspace_id = $1", - ).bind(&w_id) + ) + .bind(&w_id) .fetch_optional(db) .await .context("fetching error handler info from workspace_settings")? @@ -1324,7 +1282,7 @@ pub async fn handle_maybe_scheduled_job<'c>( Error::QuotaExceeded(_) => {} _ => { report_error_to_workspace_handler_or_critical_side_channel(job, db, - format!("Could not schedule next job for {} with err {}. Schedule disabled", schedule.path, err.to_string()) + format!("Could not schedule next job for {} with err {}. Schedule disabled", schedule.path, err.to_string()), ).await; } } @@ -1334,8 +1292,8 @@ pub async fn handle_maybe_scheduled_job<'c>( Error::QuotaExceeded(_) => Err(err), _ => { report_error_to_workspace_handler_or_critical_side_channel(job, db, - format!("Could not schedule next job for {} and could not disable schedule with err {}.", schedule.path, disable_err) - ).await; + format!("Could not schedule next job for {} and could not disable schedule with err {}.", schedule.path, disable_err), + ).await; Err(to_anyhow(disable_err).into()) } }, @@ -1386,7 +1344,7 @@ async fn apply_schedule_handlers<'a, 'c, T: Serialize + Send + Sync>( success AS \"success!\", result AS \"result: Json>\", started_at AS \"started_at!\" - FROM completed_job + FROM v2_as_completed_job WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4 ORDER BY created_at DESC LIMIT $5", @@ -1457,7 +1415,7 @@ async fn apply_schedule_handlers<'a, 'c, T: Serialize + Send + Sync>( success AS \"success!\", result AS \"result: Json>\", started_at AS \"started_at!\"\ - FROM completed_job WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4 + FROM v2_as_completed_job WHERE workspace_id = $1 AND schedule_path = $2 AND script_path = $3 AND id != $4 ORDER BY created_at DESC LIMIT $5", &schedule.workspace_id, @@ -1891,7 +1849,7 @@ pub async fn pull( ); sqlx::query_scalar!( - "SELECT null FROM queue WHERE id = $1 FOR UPDATE", + "SELECT null FROM v2_job_queue WHERE id = $1 FOR UPDATE", pulled_job.id ) .fetch_one(&mut *tx) @@ -1932,7 +1890,7 @@ pub async fn pull( let min_started_at = sqlx::query!( "SELECT COALESCE((SELECT MIN(started_at) as min_started_at - FROM queue + FROM v2_as_queue WHERE script_path = $1 AND job_kind != 'dependencies' AND running = true AND workspace_id = $2 AND canceled = false AND concurrent_limit > 0), $3) as min_started_at, now() AS now", job_script_path, &pulled_job.workspace_id, @@ -1979,7 +1937,7 @@ pub async fn pull( let job_uuid: Uuid = pulled_job.id; let avg_script_duration: Option = sqlx::query_scalar!( "SELECT CAST(ROUND(AVG(duration_ms), 0) AS BIGINT) AS avg_duration_s FROM - (SELECT duration_ms FROM concurrency_key LEFT JOIN completed_job ON completed_job.id = concurrency_key.job_id WHERE key = $1 AND ended_at IS NOT NULL + (SELECT duration_ms FROM concurrency_key LEFT JOIN v2_as_completed_job ON v2_as_completed_job.id = concurrency_key.job_id WHERE key = $1 AND ended_at IS NOT NULL ORDER BY ended_at DESC LIMIT 10) AS t", job_concurrency_key @@ -2011,7 +1969,7 @@ pub async fn pull( loop { let nestimated = estimated_next_schedule_timestamp + inc; let jobs_in_window = sqlx::query_scalar!( - "SELECT COUNT(*) FROM queue LEFT JOIN concurrency_key ON concurrency_key.job_id = queue.id + "SELECT COUNT(*) FROM v2_as_queue LEFT JOIN concurrency_key ON concurrency_key.job_id = v2_as_queue.id WHERE key = $1 AND running = false AND canceled = false AND scheduled_for >= $2 AND scheduled_for < $3", job_concurrency_key, estimated_next_schedule_timestamp, @@ -2035,18 +1993,18 @@ pub async fn pull( // if using posgtres, then we're able to re-queue the entire batch of scheduled job for this script_path, so we do it sqlx::query!( - "UPDATE queue - SET running = false - , started_at = null - , scheduled_for = $1 - , last_ping = null - WHERE id = $2", - estimated_next_schedule_timestamp, - job_uuid, - ) - .fetch_all(&mut *tx) - .await - .map_err(|e| Error::InternalErr(format!("Could not update and re-queue job {job_uuid}. The job will be marked as running but it is not running: {e:#}")))?; + "WITH ping AS (UPDATE v2_job_runtime SET ping = NULL WHERE id = $2 RETURNING id) + UPDATE v2_job_queue SET + running = false, + started_at = null, + scheduled_for = $1 + WHERE id = (SELECT id FROM ping)", + estimated_next_schedule_timestamp, + job_uuid, + ) + .fetch_all(&mut *tx) + .await + .map_err(|e| Error::InternalErr(format!("Could not update and re-queue job {job_uuid}. The job will be marked as running but it is not running: {e:#}")))?; tx.commit().await? } } @@ -2228,8 +2186,8 @@ pub async fn get_result_by_id( "SELECT id As \"id!\", flow_status->'restarted_from'->'flow_job_id' AS \"restarted_from: Json\" - FROM queue - WHERE COALESCE((SELECT root_job FROM queue WHERE id = $1), $1) = id AND workspace_id = $2", + FROM v2_as_queue + WHERE COALESCE((SELECT flow_root_job FROM v2_job WHERE id = $1), $1) = id AND workspace_id = $2", flow_id, &w_id ) @@ -2308,7 +2266,7 @@ pub async fn get_result_and_success_by_id_from_flow( JobResult::SingleJob(job_id) => { sqlx::query_scalar!( "SELECT success AS \"success!\" - FROM completed_job WHERE id = $1 AND workspace_id = $2", + FROM v2_as_completed_job WHERE id = $1 AND workspace_id = $2", job_id, w_id ) @@ -2325,7 +2283,11 @@ pub async fn get_result_and_success_by_id_from_flow( SELECT module->>'type' = 'Success' FROM modules WHERE module->>'id' = $3"#, - if completed { "completed_job" } else { "queue" } + if completed { + "v2_as_completed_job" + } else { + "v2_as_queue" + } ); sqlx::query_scalar(&query) .bind(flow_id) @@ -2366,8 +2328,8 @@ pub async fn get_result_by_id_from_running_flow_inner( ) -> error::Result { let flow_job_result = sqlx::query!( "SELECT leaf_jobs->$1::text AS \"leaf_jobs: Json>\", parent_job - FROM queue - WHERE COALESCE((SELECT root_job FROM queue WHERE id = $2), $2) = id AND workspace_id = $3", + FROM v2_as_queue + WHERE COALESCE((SELECT flow_root_job FROM v2_job WHERE id = $2), $2) = id AND workspace_id = $3", node_id, flow_id, w_id, @@ -2388,11 +2350,12 @@ pub async fn get_result_by_id_from_running_flow_inner( if job_result.is_none() && flow_job_result.parent_job.is_some() { let parent_job = flow_job_result.parent_job.unwrap(); - let root_job = sqlx::query_scalar!("SELECT root_job FROM queue WHERE id = $1", parent_job) - .fetch_optional(db) - .await? - .flatten() - .unwrap_or(parent_job); + let root_job = + sqlx::query_scalar!("SELECT flow_root_job FROM v2_job WHERE id = $1", parent_job) + .fetch_optional(db) + .await? + .flatten() + .unwrap_or(parent_job); return get_result_by_id_from_running_flow_inner(db, w_id, &root_job, node_id).await; } @@ -2429,7 +2392,7 @@ async fn get_completed_flow_node_result_rec( } else { let subflows = sqlx::query!( "SELECT id AS \"id!\", flow_status AS \"flow_status!: Json\" - FROM completed_job + FROM v2_as_completed_job WHERE parent_job = $1 AND workspace_id = $2 AND flow_status IS NOT NULL", id, w_id @@ -2459,8 +2422,8 @@ async fn get_result_by_id_from_original_flow_inner( node_id: &str, ) -> error::Result { let flow_job = sqlx::query!( - "SELECT id AS \"id!\", flow_status AS \"flow_status!: Json\" - FROM completed_job WHERE id = $1 AND workspace_id = $2", + "SELECT id, flow_status AS \"flow_status!: Json\" + FROM v2_job_completed WHERE id = $1 AND workspace_id = $2", completed_flow_id, w_id ) @@ -2514,7 +2477,7 @@ async fn extract_result_from_job_result( }; Ok(sqlx::query_scalar!( "SELECT result #> $3 AS \"result: Json>\" - FROM completed_job WHERE id = $1 AND workspace_id = $2", + FROM v2_job_completed WHERE id = $1 AND workspace_id = $2", job_id, w_id, parts.collect::>() as Vec<&str> @@ -2527,8 +2490,8 @@ async fn extract_result_from_job_result( } None => { let rows = sqlx::query!( - "SELECT id AS \"id!\", result AS \"result: Json>\" - FROM completed_job WHERE id = ANY($1) AND workspace_id = $2", + "SELECT id, result AS \"result: Json>\" + FROM v2_job_completed WHERE id = ANY($1) AND workspace_id = $2", job_ids.as_slice(), w_id ) @@ -2550,7 +2513,7 @@ async fn extract_result_from_job_result( }, JobResult::SingleJob(x) => Ok(sqlx::query!( "SELECT result #> $3 AS \"result: Json>\" - FROM completed_job WHERE id = $1 AND workspace_id = $2", + FROM v2_job_completed WHERE id = $1 AND workspace_id = $2", x, w_id, json_path @@ -2577,7 +2540,7 @@ pub async fn delete_job<'c>( } let job_removed = sqlx::query_scalar!( - "DELETE FROM queue WHERE workspace_id = $1 AND id = $2 RETURNING 1", + "DELETE FROM v2_job_queue WHERE workspace_id = $1 AND id = $2 RETURNING 1", w_id, job_id ) @@ -2601,7 +2564,7 @@ pub async fn delete_job<'c>( pub async fn job_is_complete(db: &DB, id: Uuid, w_id: &str) -> error::Result { Ok(sqlx::query_scalar!( - "SELECT EXISTS(SELECT 1 FROM completed_job WHERE id = $1 AND workspace_id = $2)", + "SELECT EXISTS(SELECT 1 FROM v2_job_completed WHERE id = $1 AND workspace_id = $2)", id, w_id ) @@ -2617,7 +2580,7 @@ async fn get_queued_job_tx<'c>( ) -> error::Result> { sqlx::query_as::<_, QueuedJob>( "SELECT * - FROM queue WHERE id = $1 AND workspace_id = $2", + FROM v2_as_queue WHERE id = $1 AND workspace_id = $2", ) .bind(id) .bind(w_id) @@ -2629,7 +2592,7 @@ async fn get_queued_job_tx<'c>( pub async fn get_queued_job(id: &Uuid, w_id: &str, db: &DB) -> error::Result> { sqlx::query_as::<_, QueuedJob>( "SELECT * - FROM queue WHERE id = $1 AND workspace_id = $2", + FROM v2_as_queue WHERE id = $1 AND workspace_id = $2", ) .bind(id) .bind(w_id) @@ -2757,7 +2720,7 @@ pub async fn push<'c, 'd>( parent_job: Option, root_job: Option, job_id: Option, - is_flow_step: bool, + _is_flow_step: bool, mut same_worker: bool, pre_run_error: Option<&windmill_common::error::Error>, visible_to_owner: bool, @@ -2856,11 +2819,13 @@ pub async fn push<'c, 'd>( ))); } - let in_queue = - sqlx::query_scalar!("SELECT COUNT(id) FROM queue WHERE email = $1", email) - .fetch_one(_db) - .await? - .unwrap_or(0); + let in_queue = sqlx::query_scalar!( + "SELECT COUNT(id) FROM v2_as_queue WHERE email = $1", + email + ) + .fetch_one(_db) + .await? + .unwrap_or(0); if in_queue > MAX_FREE_EXECS.into() { return Err(error::Error::QuotaExceeded(format!( @@ -2869,7 +2834,7 @@ pub async fn push<'c, 'd>( } let concurrent_runs = sqlx::query_scalar!( - "SELECT COUNT(id) FROM queue WHERE running = true AND email = $1", + "SELECT COUNT(id) FROM v2_as_queue WHERE running = true AND email = $1", email ) .fetch_one(_db) @@ -2888,16 +2853,16 @@ pub async fn push<'c, 'd>( workspace_usage } else { sqlx::query_scalar!( - "SELECT usage.usage + 1 FROM usage - WHERE is_workspace IS TRUE AND - month_ = EXTRACT(YEAR FROM current_date) * 12 + EXTRACT(MONTH FROM current_date) - AND id = $1", - workspace_id - ) - .fetch_optional(_db) - .await? - .flatten() - .unwrap_or(1) + "SELECT usage.usage + 1 FROM usage + WHERE is_workspace IS TRUE AND + month_ = EXTRACT(YEAR FROM current_date) * 12 + EXTRACT(MONTH FROM current_date) + AND id = $1", + workspace_id + ) + .fetch_optional(_db) + .await? + .flatten() + .unwrap_or(1) }; if workspace_usage > MAX_FREE_EXECS @@ -2911,7 +2876,7 @@ pub async fn push<'c, 'd>( } let in_queue_workspace = sqlx::query_scalar!( - "SELECT COUNT(id) FROM queue WHERE workspace_id = $1", + "SELECT COUNT(id) FROM v2_job_queue WHERE workspace_id = $1", workspace_id ) .fetch_one(_db) @@ -2925,7 +2890,7 @@ pub async fn push<'c, 'd>( } let concurrent_runs_workspace = sqlx::query_scalar!( - "SELECT COUNT(id) FROM queue WHERE running = true AND workspace_id = $1", + "SELECT COUNT(id) FROM v2_job_queue WHERE running = true AND workspace_id = $1", workspace_id ) .fetch_one(_db) @@ -3663,12 +3628,9 @@ pub async fn push<'c, 'd>( let mut tx = tx.into_tx().await?; let job_id: Uuid = if let Some(job_id) = job_id { - let conflicting_id = sqlx::query_scalar!( - "SELECT 1 FROM queue WHERE id = $1 UNION ALL select 1 FROM completed_job WHERE id = $1", - job_id - ) - .fetch_optional(&mut *tx) - .await?; + let conflicting_id = sqlx::query_scalar!("SELECT 1 FROM v2_job WHERE id = $1", job_id) + .fetch_optional(&mut *tx) + .await?; if conflicting_id.is_some() { return Err(Error::BadRequest(format!( @@ -3712,71 +3674,80 @@ pub async fn push<'c, 'd>( let raw_flow = raw_flow.map(Json); sqlx::query!( - "INSERT INTO job (id, workspace_id, raw_code, raw_lock, raw_flow, tag) - VALUES ($1, $2, $3, $4, $5, $6)", + "INSERT INTO v2_job (id, workspace_id, raw_code, raw_lock, raw_flow, tag, parent_job, + created_by, permissioned_as, runnable_id, runnable_path, args, kind, trigger, + script_lang, same_worker, pre_run_error, permissioned_as_email, visible_to_owner, + flow_root_job, concurrent_limit, concurrency_time_window_s, timeout, flow_step_id, + cache_ttl, priority, trigger_kind) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, + $19, $20, $21, $22, $23, $24, $25, $26, + CASE WHEN $14::VARCHAR IS NOT NULL THEN 'schedule'::job_trigger_kind END)", job_id, workspace_id, raw_code, raw_lock, - raw_flow.as_ref() as Option<&Json>, + raw_flow as Option>, tag, - ) - .execute(&mut *tx) - .warn_after_seconds(1) - .await?; - - let (raw_code, raw_lock, raw_flow) = if !*MIN_VERSION_IS_AT_LEAST_1_427.read().await { - (raw_code, raw_lock, raw_flow) - } else { - (None, None, None) - }; - - tracing::debug!("Pushing job {job_id} with tag {tag}, schedule_path {schedule_path:?}, script_path: {script_path:?}, email {email}, workspace_id {workspace_id}"); - let uuid = sqlx::query_scalar!( - "INSERT INTO queue - (workspace_id, id, running, parent_job, created_by, permissioned_as, scheduled_for, - script_hash, script_path, raw_code, raw_lock, args, job_kind, schedule_path, raw_flow, \ - flow_status, is_flow_step, language, started_at, same_worker, pre_run_error, email, \ - visible_to_owner, root_job, tag, concurrent_limit, concurrency_time_window_s, timeout, \ - flow_step_id, cache_ttl, priority, last_ping) - VALUES ($1, $2, $3, $4, $5, $6, COALESCE($7, now()), $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, CASE WHEN $3 THEN now() END, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, NULL) \ - RETURNING id AS \"id!\"", - workspace_id, - job_id, - is_running, parent_job, user, permissioned_as, - scheduled_for_o, script_hash, script_path.clone(), - raw_code, - raw_lock, Json(args) as Json, job_kind.clone() as JobKind, schedule_path, - raw_flow.as_ref() as Option<&Json>, - flow_status.map(Json) as Option>, - is_flow_step, language as Option, same_worker, pre_run_error.map(|e| e.to_string()), email, visible_to_owner, root_job, - tag, concurrent_limit, - if concurrent_limit.is_some() { concurrency_time_window_s } else { None }, + if concurrent_limit.is_some() { + concurrency_time_window_s + } else { + None + }, custom_timeout, flow_step_id, cache_ttl, final_priority, ) + .execute(&mut *tx) + .warn_after_seconds(1) + .await?; + + tracing::debug!("Pushing job {job_id} with tag {tag}, schedule_path {schedule_path:?}, script_path: {script_path:?}, email {email}, workspace_id {workspace_id}"); + let uuid = sqlx::query_scalar!( + "INSERT INTO v2_job_queue + (workspace_id, id, running, scheduled_for, started_at, tag, priority) + VALUES ($1, $2, $3, COALESCE($4, now()), CASE WHEN $3 THEN now() END, $5, $6) \ + RETURNING id AS \"id!\"", + workspace_id, + job_id, + is_running, + scheduled_for_o, + tag, + final_priority, + ) .fetch_one(&mut *tx) .warn_after_seconds(1) .await .map_err(|e| Error::InternalErr(format!("Could not insert into queue {job_id} with tag {tag}, schedule_path {schedule_path:?}, script_path: {script_path:?}, email {email}, workspace_id {workspace_id}: {e:#}")))?; + sqlx::query!("INSERT INTO v2_job_runtime (id) VALUES ($1)", job_id) + .execute(&mut *tx) + .await?; + if let Some(flow_status) = flow_status { + sqlx::query!( + "INSERT INTO v2_job_flow_runtime (id, flow_status) VALUES ($1, $2)", + job_id, + Json(flow_status) as Json, + ) + .execute(&mut *tx) + .await?; + } + tracing::debug!("Pushed {job_id}"); // TODO: technically the job isn't queued yet, as the transaction can be rolled back. Should be solved when moving these metrics to the queue abstraction. #[cfg(feature = "prometheus")] @@ -3928,7 +3899,7 @@ async fn restarted_flows_resolution( job_kind AS \"job_kind!: JobKind\", flow_status AS \"flow_status: Json>\", raw_flow AS \"raw_flow: Json>\" - FROM completed_job WHERE id = $1 and workspace_id = $2", + FROM v2_as_completed_job WHERE id = $1 and workspace_id = $2", completed_flow_id, workspace_id, ) @@ -4040,7 +4011,7 @@ async fn restarted_flows_resolution( truncated_modules.push(FlowStatusModule::InProgress { id: module.id(), job: new_flow_jobs[new_flow_jobs.len() - 1], // set to last finished job from completed flow - iterator: Some(Iterator { + iterator: Some(FlowIterator { index: branch_or_iteration_n - 1, // same deal as above, this refers to the last finished job itered: vec![], // Setting itered to empty array here, such that input transforms will be re-computed by worker_flows }), diff --git a/backend/windmill-queue/src/schedule.rs b/backend/windmill-queue/src/schedule.rs index ea9f476ab6538..436d83bbea7b8 100644 --- a/backend/windmill-queue/src/schedule.rs +++ b/backend/windmill-queue/src/schedule.rs @@ -71,7 +71,7 @@ pub async fn push_scheduled_job<'c>( let next = next.with_timezone(&chrono::Utc); let already_exists: bool = query_scalar!( - "SELECT EXISTS (SELECT 1 FROM queue WHERE workspace_id = $1 AND schedule_path = $2 AND scheduled_for = $3)", + "SELECT EXISTS (SELECT 1 FROM v2_as_queue WHERE workspace_id = $1 AND schedule_path = $2 AND scheduled_for = $3)", &schedule.workspace_id, &schedule.path, next diff --git a/backend/windmill-worker/src/common.rs b/backend/windmill-worker/src/common.rs index 61ec9cee6a33c..383e194564680 100644 --- a/backend/windmill-worker/src/common.rs +++ b/backend/windmill-worker/src/common.rs @@ -265,7 +265,7 @@ pub async fn transform_json_value( } Value::String(y) if y.starts_with("$") => { let flow_path = if let Some(uuid) = job.parent_job { - sqlx::query_scalar!("SELECT script_path FROM queue WHERE id = $1", uuid) + sqlx::query_scalar!("SELECT runnable_path FROM v2_job WHERE id = $1", uuid) .fetch_optional(db) .await? .flatten() @@ -399,7 +399,7 @@ pub async fn get_reserved_variables( db: &sqlx::Pool, ) -> Result, Error> { let flow_path = if let Some(uuid) = job.parent_job { - sqlx::query_scalar!("SELECT script_path FROM queue WHERE id = $1", uuid) + sqlx::query_scalar!("SELECT runnable_path FROM v2_job WHERE id = $1", uuid) .fetch_optional(db) .await? .flatten() diff --git a/backend/windmill-worker/src/handle_child.rs b/backend/windmill-worker/src/handle_child.rs index 8088f078cb874..9ae6f1dbb2900 100644 --- a/backend/windmill-worker/src/handle_child.rs +++ b/backend/windmill-worker/src/handle_child.rs @@ -207,13 +207,10 @@ pub async fn handle_child( let set_reason = async { if matches!(kill_reason, KillReason::Timeout { .. }) { if let Err(err) = sqlx::query!( - r#" - UPDATE queue - SET canceled = true - , canceled_by = 'timeout' - , canceled_reason = $1 - WHERE id = $2 - "#, + "UPDATE v2_job_queue + SET canceled_by = 'timeout' + , canceled_reason = $1 + WHERE id = $2", format!("duration > {}", timeout_duration.as_secs()), job_id ) @@ -644,28 +641,31 @@ where } } if job_id != Uuid::nil() { - let (canceled, canceled_by, canceled_reason, already_completed) = sqlx::query!( - "UPDATE queue SET mem_peak = $1, last_ping = now() - WHERE id = $2 - RETURNING canceled AS \"canceled!\", canceled_by, canceled_reason", + let (canceled_by, canceled_reason, already_completed) = sqlx::query!( + "UPDATE v2_job_runtime r SET + memory_peak = $1, + ping = now() + FROM v2_job_queue q + WHERE r.id = $2 AND q.id = r.id + RETURNING canceled_by, canceled_reason", *mem_peak, job_id ) - .map(|x| (x.canceled, x.canceled_by, x.canceled_reason, false)) + .map(|x| (x.canceled_by, x.canceled_reason, false)) .fetch_optional(&db) .await .unwrap_or_else(|e| { tracing::error!(%e, "error updating job {job_id}: {e:#}"); - Some((false, None, None, false)) + Some((None, None, false)) }) .unwrap_or_else(|| { // if the job is not in queue, it can only be in the completed_job so it is already complete - (false, None, None, true) + (None, None, true) }); if already_completed { return UpdateJobPollingExit::AlreadyCompleted } - if canceled { + if canceled_by.is_some() { canceled_by_ref.replace(CanceledBy { username: canceled_by.clone(), reason: canceled_reason.clone(), diff --git a/backend/windmill-worker/src/python_executor.rs b/backend/windmill-worker/src/python_executor.rs index 001f771eb70ba..36ec65c94af37 100644 --- a/backend/windmill-worker/src/python_executor.rs +++ b/backend/windmill-worker/src/python_executor.rs @@ -1981,29 +1981,26 @@ pub async fn handle_python_reqs( // Notify server that we are still alive // Detect if job has been canceled - let canceled = - sqlx::query_scalar::<_, bool> - (r#" - - UPDATE queue - SET last_ping = now() - , mem_peak = $1 - WHERE id = $2 - RETURNING canceled - - "#) - .bind(mem_peak_actual) - .bind(job_id_2) - .fetch_optional(&db_2) - .await - .unwrap_or_else(|e| { - tracing::error!(%e, "error updating job {job_id_2}: {e:#}"); - Some(false) - }) - .unwrap_or_else(|| { - // if the job is not in queue, it can only be in the completed_job so it is already complete - false - }); + let canceled = sqlx::query_scalar!( + "UPDATE v2_job_runtime r SET + memory_peak = $1, + ping = now() + FROM v2_job_queue q + WHERE r.id = $2 AND q.id = r.id + RETURNING canceled_by IS NOT NULL AS \"canceled!\"", + mem_peak_actual, + job_id_2 + ) + .fetch_optional(&db_2) + .await + .unwrap_or_else(|e| { + tracing::error!(%e, "error updating job {job_id_2}: {e:#}"); + Some(false) + }) + .unwrap_or_else(|| { + // if the job is not in queue, it can only be in the completed_job so it is already complete + false + }); if canceled { @@ -2224,12 +2221,12 @@ pub async fn handle_python_reqs( uv_install_proccess.kill().await?; pids.lock().await.get_mut(i).and_then(|e| e.take()); return Err(anyhow::anyhow!("uv pip install was canceled")); - }, + }, (_, exitstatus) = async { // See tokio::process::Child::wait_with_output() for more context // Sometimes uv_install_proccess.wait() is not exiting if stderr is not awaited before it :/ (stderr_future.await, uv_install_proccess.wait().await) - } => match exitstatus { + } => match exitstatus { Ok(status) => if !status.success() { tracing::warn!( workspace_id = %w_id, diff --git a/backend/windmill-worker/src/worker.rs b/backend/windmill-worker/src/worker.rs index 0d657fbf16a4e..03772606b95d6 100644 --- a/backend/windmill-worker/src/worker.rs +++ b/backend/windmill-worker/src/worker.rs @@ -1280,7 +1280,7 @@ pub async fn run_worker( tokio::task::spawn( (async move { tracing::info!(worker = %worker_name, hostname = %hostname, "vacuuming queue"); - if let Err(e) = sqlx::query!("VACUUM (skip_locked) queue") + if let Err(e) = sqlx::query!("VACUUM (skip_locked) v2_job_queue, v2_job_runtime, v2_job_flow_runtime") .execute(&db2) .await { @@ -1327,7 +1327,9 @@ pub async fn run_worker( same_worker_job.job_id ); let r = sqlx::query_as::<_, PulledJob>( - "UPDATE queue SET last_ping = now() WHERE id = $1 RETURNING *", + "WITH ping AS ( + UPDATE v2_job_runtime SET ping = NOW() WHERE id = $1 RETURNING id + ) SELECT * FROM v2_as_queue WHERE id = (SELECT id FROM ping)", ) .bind(same_worker_job.job_id) .fetch_optional(db) @@ -2003,14 +2005,24 @@ async fn handle_queued_job( .await?; } else if let Some(parent_job) = job.parent_job { if let Err(e) = sqlx::query_scalar!( - "UPDATE queue SET flow_status = jsonb_set(jsonb_set(COALESCE(flow_status, '{}'::jsonb), array[$1], COALESCE(flow_status->$1, '{}'::jsonb)), array[$1, 'started_at'], to_jsonb(now()::text)) WHERE id = $2 AND workspace_id = $3", + "UPDATE v2_job_flow_runtime SET + flow_status = jsonb_set( + jsonb_set( + COALESCE(flow_status, '{}'::jsonb), + array[$1], + COALESCE(flow_status->$1, '{}'::jsonb) + ), + array[$1, 'started_at'], + to_jsonb(now()::text) + ) + WHERE id = $2", &job.id.to_string(), - parent_job, - &job.workspace_id + parent_job ) .execute(db) .warn_after_seconds(5) - .await { + .await + { tracing::error!("Could not update parent job started_at flow_status: {}", e); } } diff --git a/backend/windmill-worker/src/worker_flow.rs b/backend/windmill-worker/src/worker_flow.rs index b81717cee1936..626ce18aa12b4 100644 --- a/backend/windmill-worker/src/worker_flow.rs +++ b/backend/windmill-worker/src/worker_flow.rs @@ -35,12 +35,12 @@ use windmill_common::bench::BenchmarkIter; use windmill_common::cache::{self, RawData}; use windmill_common::db::Authed; use windmill_common::flow_status::{ - ApprovalConditions, FlowStatusModuleWParent, Iterator, JobResult, + ApprovalConditions, FlowStatusModuleWParent, Iterator as FlowIterator, JobResult, }; use windmill_common::flows::{add_virtual_items_if_necessary, Branch, FlowNodeId}; use windmill_common::jobs::{ - script_hash_to_tag_and_limits, script_path_to_payload, BranchResults, JobKind, JobPayload, - OnBehalfOf, QueuedJob, RawCode, ENTRYPOINT_OVERRIDE, + script_hash_to_tag_and_limits, script_path_to_payload, JobKind, JobPayload, OnBehalfOf, + QueuedJob, RawCode, ENTRYPOINT_OVERRIDE, }; use windmill_common::scripts::ScriptHash; use windmill_common::users::username_to_permissioned_as; @@ -177,11 +177,6 @@ pub struct RecUpdateFlowStatusAfterJobCompletion { skip_error_handler: bool, } -#[derive(FromRow)] -pub struct RowArgs { - pub args: Option>>>, -} - #[derive(Deserialize)] struct RecoveryObject { recover: Option, @@ -224,7 +219,7 @@ pub async fn update_flow_status_after_job_completion_internal( script_hash AS \"script_hash: ScriptHash\", flow_status AS \"flow_status!: Json>\", raw_flow AS \"raw_flow: Json>\" - FROM queue WHERE id = $1 AND workspace_id = $2 LIMIT 1", + FROM v2_as_queue WHERE id = $1 AND workspace_id = $2 LIMIT 1", flow, w_id ) @@ -353,14 +348,13 @@ pub async fn update_flow_status_after_job_completion_internal( )), _ => None, }; - let args = sqlx::query_as::<_, RowArgs>( + let args = sqlx::query_scalar!( "SELECT - args - FROM queue - WHERE id = $2", + args AS \"args: Json>>\" + FROM v2_job + WHERE id = $1", + flow ) - .bind(old_status.step) - .bind(flow) .fetch_one(db) .await .map_err(|e| { @@ -368,7 +362,7 @@ pub async fn update_flow_status_after_job_completion_internal( })?; compute_bool_from_expr( &expr, - Marc::new(args.args.unwrap_or_default().0), + Marc::new(args.unwrap_or_default().0), result.clone(), all_iters, None, @@ -412,22 +406,16 @@ pub async fn update_flow_status_after_job_completion_internal( if matches!(module_step, Step::PreprocessorStep) { sqlx::query!( - "UPDATE queue SET args = (select result FROM completed_job WHERE id = $1) WHERE id = $2", + "UPDATE v2_job SET args = (SELECT result FROM v2_job_completed WHERE id = $1) + WHERE id = $2", job_id_for_status, flow - ).execute(db).await.map_err(|e| { - Error::InternalErr(format!("error while updating args in preprocessing step: {e:#}")) - })?; - - sqlx::query!( - r#"UPDATE completed_job SET args = '{"reason":"PREPROCESSOR_ARGS_ARE_DISCARDED"}'::jsonb WHERE id = $1"#, - job_id_for_status ) .execute(db) .await .map_err(|e| { Error::InternalErr(format!( - "error while deleting args of preprocessing step: {e:#}" + "error while updating args in preprocessing step: {e:#}" )) })?; } @@ -446,7 +434,7 @@ pub async fn update_flow_status_after_job_completion_internal( .. } if *parallel => { let (nindex, len) = match (iterator, branchall) { - (Some(Iterator { itered, .. }), _) => { + (Some(FlowIterator { itered, .. }), _) => { let position = if flow_jobs_success.is_some() { find_flow_job_index(jobs, job_id_for_status) } else { @@ -455,24 +443,27 @@ pub async fn update_flow_status_after_job_completion_internal( let nindex = if let Some(position) = position { sqlx::query_scalar!( - "UPDATE queue - SET flow_status = JSONB_SET( - JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4), - ARRAY['modules', $1::TEXT, 'iterator', 'index'], - ((flow_status->'modules'->$1::int->'iterator'->>'index')::int + 1)::text::jsonb - ), - last_ping = NULL - WHERE id = $2 - RETURNING (flow_status->'modules'->$1::int->'iterator'->>'index')::int", - old_status.step, - flow, - position as i32, - json!(success) - )} else { + "UPDATE v2_job_flow_runtime SET + flow_status = JSONB_SET( + JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4), + ARRAY['modules', $1::TEXT, 'iterator', 'index'], + ((flow_status->'modules'->$1::int->'iterator'->>'index')::int + 1)::text::jsonb + ) + WHERE id = $2 + RETURNING (flow_status->'modules'->$1::int->'iterator'->>'index')::int", + old_status.step, + flow, + position as i32, + json!(success) + ) + } else { sqlx::query_scalar!( - "UPDATE queue - SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'iterator', 'index'], ((flow_status->'modules'->$1::int->'iterator'->>'index')::int + 1)::text::jsonb), - last_ping = NULL + "UPDATE v2_job_flow_runtime SET + flow_status = JSONB_SET( + flow_status, + ARRAY['modules', $1::TEXT, 'iterator', 'index'], + ((flow_status->'modules'->$1::int->'iterator'->>'index')::int + 1)::text::jsonb + ) WHERE id = $2 RETURNING (flow_status->'modules'->$1::int->'iterator'->>'index')::int", old_status.step, @@ -502,11 +493,12 @@ pub async fn update_flow_status_after_job_completion_internal( let nindex = if let Some(position) = position { sqlx::query_scalar!( - "UPDATE queue - SET flow_status = JSONB_SET( - JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4), - ARRAY['modules', $1::TEXT, 'branchall', 'branch'], ((flow_status->'modules'->$1::int->'branchall'->>'branch')::int + 1)::text::jsonb), - last_ping = NULL + "UPDATE v2_job_flow_runtime SET + flow_status = JSONB_SET( + JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4), + ARRAY['modules', $1::TEXT, 'branchall', 'branch'], + ((flow_status->'modules'->$1::int->'branchall'->>'branch')::int + 1)::text::jsonb + ) WHERE id = $2 RETURNING (flow_status->'modules'->$1::int->'branchall'->>'branch')::int", old_status.step, @@ -514,15 +506,20 @@ pub async fn update_flow_status_after_job_completion_internal( position as i32, json!(success) ) - } else { sqlx::query_scalar!( - "UPDATE queue - SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'branchall', 'branch'], ((flow_status->'modules'->$1::int->'branchall'->>'branch')::int + 1)::text::jsonb), - last_ping = NULL - WHERE id = $2 - RETURNING (flow_status->'modules'->$1::int->'branchall'->>'branch')::int", - old_status.step, - flow - )} + } else { + sqlx::query_scalar!( + "UPDATE v2_job_flow_runtime SET + flow_status = JSONB_SET( + flow_status, + ARRAY['modules', $1::TEXT, 'branchall', 'branch'], + ((flow_status->'modules'->$1::int->'branchall'->>'branch')::int + 1)::text::jsonb + ) + WHERE id = $2 + RETURNING (flow_status->'modules'->$1::int->'branchall'->>'branch')::int", + old_status.step, + flow + ) + } .fetch_one(&mut *tx) .await .map_err(|e| { @@ -537,6 +534,10 @@ pub async fn update_flow_status_after_job_completion_internal( "unexpected status for parallel module" )))?, }; + // reset ping after updating flow status: + let _ = sqlx::query!("UPDATE v2_job_runtime SET ping = NULL WHERE id = $1", flow) + .execute(&mut *tx) + .await?; if nindex == len { let mut flow_jobs_success = flow_jobs_success.clone(); if let Some(flow_job_success) = flow_jobs_success.as_mut() { @@ -550,7 +551,7 @@ pub async fn update_flow_status_after_job_completion_internal( let new_status = if skip_loop_failures || sqlx::query_scalar!( - "SELECT success AS \"success!\" FROM completed_job WHERE id = ANY($1)", + "SELECT success AS \"success!\" FROM v2_as_completed_job WHERE id = ANY($1)", jobs.as_slice() ) .fetch_all(&mut *tx) @@ -609,7 +610,11 @@ pub async fn update_flow_status_after_job_completion_internal( if parallelism.is_some() { sqlx::query!( - "UPDATE queue SET suspend = 0 WHERE parent_job = $1 AND suspend = $2 AND (flow_status->'step')::int = 0", + "UPDATE v2_job_queue q SET suspend = 0 + FROM v2_job j, v2_job_flow_runtime f + WHERE parent_job = $1 + AND f.id = j.id AND q.id = j.id + AND suspend = $2 AND (f.flow_status->'step')::int = 0", flow, nindex ) @@ -640,7 +645,7 @@ pub async fn update_flow_status_after_job_completion_internal( } } FlowStatusModule::InProgress { - iterator: Some(windmill_common::flow_status::Iterator { index, itered, .. }), + iterator: Some(FlowIterator { index, itered, .. }), flow_jobs_success, flow_jobs, while_loop, @@ -713,7 +718,7 @@ pub async fn update_flow_status_after_job_completion_internal( let is_skipped = if current_module.as_ref().is_some_and(|m| m.skip_if.is_some()) { sqlx::query_scalar!( - "SELECT job_kind = 'identity' FROM completed_job WHERE id = $1", + "SELECT kind = 'identity' FROM v2_job WHERE id = $1", job_id_for_status ) .fetch_one(db) @@ -766,7 +771,7 @@ pub async fn update_flow_status_after_job_completion_internal( let step_counter = if inc_step_counter { sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(flow_status, ARRAY['step'], $1) WHERE id = $2", json!(old_status.step + 1), @@ -790,7 +795,7 @@ pub async fn update_flow_status_after_job_completion_internal( if let Some(new_status) = new_status.as_ref() { if is_failure_step { let parent_module = sqlx::query_scalar!( - "SELECT flow_status->'failure_module'->>'parent_module' FROM queue WHERE id = $1", + "SELECT flow_status->'failure_module'->>'parent_module' FROM v2_job_flow_runtime WHERE id = $1", flow ) .fetch_one(&mut *tx) @@ -801,7 +806,7 @@ pub async fn update_flow_status_after_job_completion_internal( })?; sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(flow_status, ARRAY['failure_module'], $1) WHERE id = $2", json!(FlowStatusModuleWParent { @@ -819,7 +824,7 @@ pub async fn update_flow_status_after_job_completion_internal( })?; } else if matches!(module_step, Step::PreprocessorStep) { sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(flow_status, ARRAY['preprocessor_module'], $1) WHERE id = $2", json!(new_status), @@ -834,7 +839,7 @@ pub async fn update_flow_status_after_job_completion_internal( })?; } else { sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2) WHERE id = $3", old_status.step.to_string(), @@ -849,9 +854,9 @@ pub async fn update_flow_status_after_job_completion_internal( if let Some(job_result) = new_status.job_result() { sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET leaf_jobs = JSONB_SET(coalesce(leaf_jobs, '{}'::jsonb), ARRAY[$1::TEXT], $2) - WHERE COALESCE((SELECT root_job FROM queue WHERE id = $3), $3) = id", + WHERE COALESCE((SELECT flow_root_job FROM v2_job WHERE id = $3), $3) = id", new_status.id(), json!(job_result), flow @@ -880,14 +885,11 @@ pub async fn update_flow_status_after_job_completion_internal( .as_ref() .and_then(|m| m.stop_after_all_iters_if.as_ref().map(|x| x.expr.clone())) { - let args = sqlx::query_as::<_, RowArgs>( - "SELECT - args - FROM queue - WHERE id = $2", + let args = sqlx::query_scalar!( + "SELECT args AS \"args: Json>>\" + FROM v2_job WHERE id = $1", + flow ) - .bind(old_status.step) - .bind(flow) .fetch_one(db) .await .map_err(|e| { @@ -896,7 +898,7 @@ pub async fn update_flow_status_after_job_completion_internal( let should_stop = compute_bool_from_expr( &expr, - Marc::new(args.args.unwrap_or_default().0), + Marc::new(args.unwrap_or_default().0), nresult.clone(), None, None, @@ -926,7 +928,7 @@ pub async fn update_flow_status_after_job_completion_internal( && matches!(&new_status, Some(FlowStatusModule::Success { .. })) { sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = flow_status - 'retry' WHERE id = $1", flow @@ -937,7 +939,7 @@ pub async fn update_flow_status_after_job_completion_internal( } let flow_job = sqlx::query_as::<_, QueuedJob>( - "SELECT * FROM queue WHERE id = $1 AND workspace_id = $2", + "SELECT * FROM v2_as_queue WHERE id = $1 AND workspace_id = $2", ) .bind(flow) .bind(w_id) @@ -1033,15 +1035,22 @@ pub async fn update_flow_status_after_job_completion_internal( _cleanup_module.flow_jobs_to_clean ); sqlx::query!( - "UPDATE completed_job - SET logs = '##DELETED##', args = '{}'::jsonb, result = '{}'::jsonb - WHERE id = ANY($1)", + "UPDATE v2_job SET args = '{}'::jsonb WHERE id = ANY($1)", + &_cleanup_module.flow_jobs_to_clean, + ) + .execute(db) + .await + .map_err(|e| { + Error::InternalErr(format!("error while cleaning up completed job: {e:#}")) + })?; + sqlx::query!( + "UPDATE v2_job_completed SET result = '{}'::jsonb WHERE id = ANY($1)", &_cleanup_module.flow_jobs_to_clean, ) .execute(db) .await .map_err(|e| { - Error::InternalErr(format!("error while cleaning up completed_job: {e:#}")) + Error::InternalErr(format!("error while cleaning up completed job: {e:#}")) })?; } } @@ -1189,18 +1198,23 @@ async fn set_success_in_flow_job_success<'c>( let position = find_flow_job_index(flow_jobs, job_id_for_status); if let Some(position) = position { sqlx::query!( - "UPDATE queue SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], $4) WHERE id = $2", - old_status.step as i32, - flow, - position as i32, - json!(success) - ) - .execute(&mut **tx) - .await.map_err(|e| { - Error::InternalErr(format!( - "error while setting flow_jobs_success: {e:#}" - )) - })?; + "UPDATE v2_job_flow_runtime SET + flow_status = JSONB_SET( + flow_status, + ARRAY['modules', $1::TEXT, 'flow_jobs_success', $3::TEXT], + $4 + ) + WHERE id = $2", + old_status.step as i32, + flow, + position as i32, + json!(success) + ) + .execute(&mut **tx) + .await + .map_err(|e| { + Error::InternalErr(format!("error while setting flow_jobs_success: {e:#}")) + })?; } } Ok(()) @@ -1211,13 +1225,13 @@ async fn retrieve_flow_jobs_results( w_id: &str, job_uuids: &Vec, ) -> error::Result> { - let results = sqlx::query_as::<_, BranchResults>( + let results = sqlx::query!( "SELECT result, id - FROM completed_job + FROM v2_job_completed WHERE id = ANY($1) AND workspace_id = $2", + job_uuids.as_slice(), + w_id ) - .bind(job_uuids.as_slice()) - .bind(w_id) .fetch_all(db) .await? .into_iter() @@ -1245,7 +1259,7 @@ async fn compute_skip_branchall_failure<'c>( flow_module: Option<&FlowModule>, ) -> Result, Error> { let branch = if parallel { - sqlx::query_scalar!("SELECT script_path FROM completed_job WHERE id = $1", job) + sqlx::query_scalar!("SELECT runnable_path FROM v2_job WHERE id = $1", job) .fetch_one(db) .await .map_err(|e| { @@ -1347,7 +1361,7 @@ async fn compute_bool_from_expr( pub async fn update_flow_status_in_progress( db: &DB, - w_id: &str, + _w_id: &str, flow: Uuid, job_in_progress: Uuid, ) -> error::Result { @@ -1355,16 +1369,15 @@ pub async fn update_flow_status_in_progress( match step { Step::Step(step) => { sqlx::query!( - "UPDATE queue - SET flow_status = jsonb_set( - jsonb_set(flow_status, ARRAY['modules', $4::INTEGER::TEXT, 'job'], to_jsonb($1::UUID::TEXT)), - ARRAY['modules', $4::INTEGER::TEXT, 'type'], - to_jsonb('InProgress'::text) - ) - WHERE id = $2 AND workspace_id = $3", + "UPDATE v2_job_flow_runtime SET + flow_status = jsonb_set( + jsonb_set(flow_status, ARRAY['modules', $3::INTEGER::TEXT, 'job'], to_jsonb($1::UUID::TEXT)), + ARRAY['modules', $3::INTEGER::TEXT, 'type'], + to_jsonb('InProgress'::text) + ) + WHERE id = $2", job_in_progress, flow, - w_id, step as i32 ) .execute(db) @@ -1372,32 +1385,30 @@ pub async fn update_flow_status_in_progress( } Step::PreprocessorStep => { sqlx::query!( - "UPDATE queue - SET flow_status = jsonb_set( - jsonb_set(flow_status, ARRAY['preprocessor_module', 'job'], to_jsonb($1::UUID::TEXT)), - ARRAY['preprocessor_module', 'type'], - to_jsonb('InProgress'::text) - ) - WHERE id = $2 AND workspace_id = $3", + "UPDATE v2_job_flow_runtime SET + flow_status = jsonb_set( + jsonb_set(flow_status, ARRAY['preprocessor_module', 'job'], to_jsonb($1::UUID::TEXT)), + ARRAY['preprocessor_module', 'type'], + to_jsonb('InProgress'::text) + ) + WHERE id = $2", job_in_progress, - flow, - w_id + flow ) .execute(db) .await?; } Step::FailureStep => { sqlx::query!( - "UPDATE queue - SET flow_status = jsonb_set( - jsonb_set(flow_status, ARRAY['failure_module', 'job'], to_jsonb($1::UUID::TEXT)), - ARRAY['failure_module', 'type'], - to_jsonb('InProgress'::text) - ) - WHERE id = $2 AND workspace_id = $3", + "UPDATE v2_job_flow_runtime SET + flow_status = jsonb_set( + jsonb_set(flow_status, ARRAY['failure_module', 'job'], to_jsonb($1::UUID::TEXT)), + ARRAY['failure_module', 'type'], + to_jsonb('InProgress'::text) + ) + WHERE id = $2", job_in_progress, - flow, - w_id + flow ) .execute(db) .await?; @@ -1429,7 +1440,8 @@ impl Step { #[instrument(level = "trace", skip_all)] pub async fn get_step_of_flow_status(db: &DB, id: Uuid) -> error::Result { let r = sqlx::query!( - "SELECT (flow_status->'step')::integer as step, jsonb_array_length(flow_status->'modules') as len FROM queue WHERE id = $1", + "SELECT (flow_status->'step')::integer as step, jsonb_array_length(flow_status->'modules') as len + FROM v2_job_flow_runtime WHERE id = $1", id ) .fetch_one(db) @@ -1687,7 +1699,7 @@ async fn push_next_flow_job( .await?; if no_flow_overlap { let overlapping = sqlx::query_scalar!( - "SELECT id AS \"id!\" FROM queue WHERE schedule_path = $1 AND workspace_id = $2 AND id != $3 AND running = true", + "SELECT id AS \"id!\" FROM v2_as_queue WHERE schedule_path = $1 AND workspace_id = $2 AND id != $3 AND running = true", flow_job.schedule_path.as_ref().unwrap(), flow_job.workspace_id.as_str(), flow_job.id @@ -1800,7 +1812,7 @@ async fn push_next_flow_job( * * This only works because jobs::resume_job does the same thing. */ sqlx::query_scalar!( - "SELECT null FROM queue WHERE id = $1 FOR UPDATE", + "SELECT null FROM v2_job_queue WHERE id = $1 FOR UPDATE", flow_job.id ) .fetch_one(&mut *tx) @@ -1872,12 +1884,12 @@ async fn push_next_flow_job( } } let approval_conditions = ApprovalConditions { - user_auth_required: user_auth_required, - user_groups_required: user_groups_required, - self_approval_disabled: self_approval_disabled, + user_auth_required, + user_groups_required, + self_approval_disabled, }; sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(flow_status, ARRAY['approval_conditions'], $1) WHERE id = $2", json!(approval_conditions), @@ -1909,7 +1921,7 @@ async fn push_next_flow_job( resume_messages.push(to_raw_value(&js)); } sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT, 'approvers'], $2) WHERE id = $3", (status.step - 1).to_string(), @@ -1930,7 +1942,7 @@ async fn push_next_flow_job( // Remove the approval conditions from the flow status sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = flow_status - 'approval_conditions' WHERE id = $1", flow_job.id @@ -1948,23 +1960,32 @@ async fn push_next_flow_job( ) && is_disapproved.is_none() { sqlx::query!( - "UPDATE queue SET - flow_status = JSONB_SET(flow_status, ARRAY['modules', flow_status->>'step'::text], $1), - suspend = $2, - suspend_until = now() + $3 - WHERE id = $4", - json!(FlowStatusModule::WaitingForEvents { id: status_module.id(), count: required_events, job: last }), + "WITH suspend AS ( + UPDATE v2_job_queue SET suspend = $2, suspend_until = now() + $3 + WHERE id = $4 + RETURNING id + ) UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET( + flow_status, + ARRAY['modules', flow_status->>'step'::TEXT], + $1 + ) WHERE id = (SELECT id FROM suspend)", + json!(FlowStatusModule::WaitingForEvents { + id: status_module.id(), + count: required_events, + job: last + }), (required_events - resume_messages.len() as u16) as i32, - Duration::from_secs(suspend.timeout.map(|t| t.into()).unwrap_or_else(|| 30 * 60)) as Duration, + Duration::from_secs( + suspend.timeout.map(|t| t.into()).unwrap_or_else(|| 30 * 60) + ) as Duration, flow_job.id, ) .execute(&mut *tx) .await?; sqlx::query!( - "UPDATE queue - SET last_ping = null - WHERE id = $1 AND last_ping = $2", + "UPDATE v2_job_runtime SET ping = NULL + WHERE id = $1 AND ping = $2", flow_job.id, flow_job.last_ping ) @@ -2134,7 +2155,7 @@ async fn push_next_flow_job( scheduled_for_o = Some(from_now(retry_in)); status.retry.failed_jobs.push(job.clone()); sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(JSONB_SET(flow_status, ARRAY['retry'], $1), ARRAY['modules', $3::TEXT, 'failed_retries'], $4) WHERE id = $2", json!(RetryStatus { fail_count, ..status.retry.clone() }), @@ -2171,7 +2192,7 @@ async fn push_next_flow_job( if module.retry.as_ref().is_some_and(|x| x.has_attempts()) { sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(flow_status, ARRAY['retry'], $1) WHERE id = $2", json!(RetryStatus { fail_count: 0, failed_jobs: vec![] }), @@ -2231,7 +2252,7 @@ async fn push_next_flow_job( } else if let Some(id) = get_args_from_id { let args = sqlx::query_scalar!( "SELECT args AS \"args: Json>>\" - FROM completed_job WHERE id = $1 AND workspace_id = $2", + FROM v2_job WHERE id = $1 AND workspace_id = $2", id, &flow_job.workspace_id ) @@ -2318,7 +2339,7 @@ async fn push_next_flow_job( NextFlowTransform::Continue(job_payload, next_state) => (job_payload, next_state), NextFlowTransform::EmptyInnerFlows => { sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2) WHERE id = $3", status.step.to_string(), @@ -2369,9 +2390,7 @@ async fn push_next_flow_job( if i % 100 == 0 && i != 0 { tracing::info!(id = %flow_job.id, root_id = %job_root, "pushed (non-commited yet) first {i} subflows of {len}"); sqlx::query!( - "UPDATE queue - SET last_ping = now() - WHERE id = $1 AND last_ping < now()", + "UPDATE v2_job_runtime SET ping = now() WHERE id = $1 AND ping < now()", flow_job.id, ) .execute(db) @@ -2445,7 +2464,7 @@ async fn push_next_flow_job( } NextStatus::AllFlowJobs { branchall: None, - iterator: Some(Iterator { itered, .. }), + iterator: Some(FlowIterator { itered, .. }), simple_input_transforms, } => { if let Ok(args) = args.as_ref() { @@ -2595,9 +2614,11 @@ async fn push_next_flow_job( if i as u16 >= p { sqlx::query!( - "UPDATE queue - SET suspend = $1, suspend_until = now() + interval '14 day', running = true - WHERE id = $2", + "UPDATE v2_job_queue SET + suspend = $1, + suspend_until = now() + interval '14 day', + running = true + WHERE id = $2", (i as u16 - p + 1) as i32, uuid, ) @@ -2614,7 +2635,7 @@ async fn push_next_flow_job( })?; sqlx::query!( - "UPDATE queue + "UPDATE v2_job_flow_runtime SET flow_status = JSONB_SET(flow_status, ARRAY['cleanup_module', 'flow_jobs_to_clean'], COALESCE(flow_status->'cleanup_module'->'flow_jobs_to_clean', '[]'::jsonb) || $1) WHERE id = $2", uuid_singleton_json, @@ -2673,7 +2694,7 @@ async fn push_next_flow_job( } FlowStatusModule::InProgress { job: uuid, - iterator: Some(windmill_common::flow_status::Iterator { index, itered }), + iterator: Some(FlowIterator { index, itered }), flow_jobs: Some(flow_jobs), flow_jobs_success, branch_chosen: None, @@ -2743,9 +2764,12 @@ async fn push_next_flow_job( match step { Step::FailureStep => { sqlx::query!( - "UPDATE queue - SET flow_status = JSONB_SET( - JSONB_SET(flow_status, ARRAY['failure_module'], $1), ARRAY['step'], $2) + "UPDATE v2_job_flow_runtime SET + flow_status = JSONB_SET( + JSONB_SET(flow_status, ARRAY['failure_module'], $1), + ARRAY['step'], + $2 + ) WHERE id = $3", json!(FlowStatusModuleWParent { parent_module: Some(current_id.clone()), @@ -2759,9 +2783,12 @@ async fn push_next_flow_job( } Step::PreprocessorStep => { sqlx::query!( - "UPDATE queue - SET flow_status = JSONB_SET( - JSONB_SET(flow_status, ARRAY['preprocessor_module'], $1), ARRAY['step'], $2) + "UPDATE v2_job_flow_runtime SET + flow_status = JSONB_SET( + JSONB_SET(flow_status, ARRAY['preprocessor_module'], $1), + ARRAY['step'], + $2 + ) WHERE id = $3", json!(new_status), json!(-1), @@ -2772,9 +2799,12 @@ async fn push_next_flow_job( } Step::Step(i) => { sqlx::query!( - "UPDATE queue - SET flow_status = JSONB_SET( - JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2), ARRAY['step'], $3) + "UPDATE v2_job_flow_runtime SET + flow_status = JSONB_SET( + JSONB_SET(flow_status, ARRAY['modules', $1::TEXT], $2), + ARRAY['step'], + $3 + ) WHERE id = $4", i as i32, json!(new_status), @@ -2789,9 +2819,7 @@ async fn push_next_flow_job( potentially_crash_for_testing(); sqlx::query!( - "UPDATE queue - SET last_ping = null - WHERE id = $1", + "UPDATE v2_job_runtime SET ping = null WHERE id = $1", flow_job.id ) .execute(&mut *tx) @@ -2914,7 +2942,7 @@ enum NextStatus { }, AllFlowJobs { branchall: Option, - iterator: Option, + iterator: Option, simple_input_transforms: Option>, }, } @@ -3257,10 +3285,7 @@ async fn compute_next_flow_transform( ContinuePayload::ParallelJobs(payloads), NextStatus::AllFlowJobs { branchall: None, - iterator: Some(windmill_common::flow_status::Iterator { - index: 0, - itered, - }), + iterator: Some(FlowIterator { index: 0, itered }), // we removed the is_simple_case for simple_input_transforms // if is_simple { // match value { @@ -3607,7 +3632,7 @@ async fn next_forloop_status( } FlowStatusModule::InProgress { - iterator: Some(windmill_common::flow_status::Iterator { itered, index }), + iterator: Some(FlowIterator { itered, index }), flow_jobs: Some(flow_jobs), flow_jobs_success, .. @@ -3951,11 +3976,12 @@ async fn get_previous_job_result( Ok(Some(retrieve_flow_jobs_results(db, w_id, flow_jobs).await?)) } Some(FlowStatusModule::Success { job, .. }) => Ok(Some( - sqlx::query_scalar::<_, Json>>( - "SELECT result FROM completed_job WHERE id = $1 AND workspace_id = $2", + sqlx::query_scalar!( + "SELECT result AS \"result!: Json>\" + FROM v2_job_completed WHERE id = $1 AND workspace_id = $2", + job, + w_id ) - .bind(job) - .bind(w_id) .fetch_one(db) .await? .0, diff --git a/backend/windmill-worker/src/worker_lockfiles.rs b/backend/windmill-worker/src/worker_lockfiles.rs index 9187b5e24b7d2..b5f37aa6c697d 100644 --- a/backend/windmill-worker/src/worker_lockfiles.rs +++ b/backend/windmill-worker/src/worker_lockfiles.rs @@ -634,7 +634,7 @@ pub async fn handle_flow_dependency_job( // Re-check cancellation to ensure we don't accidentally override a flow. if sqlx::query_scalar!( - "SELECT canceled AS \"canceled!\" FROM queue WHERE id = $1", + "SELECT canceled_by IS NOT NULL AS \"canceled!\" FROM v2_job_queue WHERE id = $1", job.id ) .fetch_optional(db) @@ -1528,7 +1528,7 @@ pub async fn handle_app_dependency_job( // Re-check cancelation to ensure we don't accidentially override an app. if sqlx::query_scalar!( - "SELECT canceled AS \"canceled!\" FROM queue WHERE id = $1", + "SELECT canceled_by IS NOT NULL AS \"canceled!\" FROM v2_job_queue WHERE id = $1", job.id ) .fetch_optional(db)