diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml index f3ec1d48262d..a86f236f49d7 100644 --- a/.github/workflows/black.yml +++ b/.github/workflows/black.yml @@ -5,38 +5,11 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - id: files - uses: tj-actions/changed-files@v41.0.0 - with: - files: | - cvat-sdk/**/*.py - cvat-cli/**/*.py - tests/python/**/*.py - cvat/apps/quality_control/**/*.py - cvat/apps/analytics_report/**/*.py - dir_names: true - name: Run checks - env: - PR_FILES_AM: ${{ steps.files.outputs.added_modified }} - PR_FILES_RENAMED: ${{ steps.files.outputs.renamed }} run: | - # If different modules use different Black configs, - # we need to run Black for each python component group separately. - # Otherwise, they all will use the same config. + pipx install $(grep "^black" ./dev/requirements.txt) - UPDATED_DIRS="${{steps.files.outputs.all_changed_files}}" + echo "Black version: $(black --version)" - if [[ ! -z $UPDATED_DIRS ]]; then - pipx install $(egrep "black.*" ./cvat-cli/requirements/development.txt) - - echo "Black version: "$(black --version) - echo "The dirs will be checked: $UPDATED_DIRS" - EXIT_CODE=0 - for DIR in $UPDATED_DIRS; do - black --check --diff $DIR || EXIT_CODE=$(($? | $EXIT_CODE)) || true - done - exit $EXIT_CODE - else - echo "No files with the \"py\" extension found" - fi + black --check --diff . diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index b52deddc3f58..c93361d55975 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -5,7 +5,7 @@ on: - 'master' - 'develop' pull_request: - types: [ready_for_review, opened, synchronize, reopened] + types: [opened, synchronize, reopened] concurrency: group: ${{ github.workflow }}-${{ github.ref }} diff --git a/.github/workflows/finalize-release.yml b/.github/workflows/finalize-release.yml index 2cb6035769ae..8f19cb1e9e60 100644 --- a/.github/workflows/finalize-release.yml +++ b/.github/workflows/finalize-release.yml @@ -65,7 +65,7 @@ jobs: - name: Bump version run: - ./dev/update_version.py --minor + ./dev/update_version.py --patch - name: Commit post-release changes run: | diff --git a/.github/workflows/full.yml b/.github/workflows/full.yml index c6369340b5c3..e42380de5ead 100644 --- a/.github/workflows/full.yml +++ b/.github/workflows/full.yml @@ -55,7 +55,7 @@ jobs: cache-from: type=local,src=/tmp/cvat_cache_server context: . file: Dockerfile - tags: cvat/server + tags: cvat/server:${{ env.CVAT_VERSION }} outputs: type=docker,dest=/tmp/cvat_server/image.tar - name: CVAT UI. Build and push @@ -64,7 +64,7 @@ jobs: cache-from: type=local,src=/tmp/cvat_cache_ui context: . file: Dockerfile.ui - tags: cvat/ui + tags: cvat/ui:${{ env.CVAT_VERSION }} outputs: type=docker,dest=/tmp/cvat_ui/image.tar - name: CVAT SDK. Build @@ -102,7 +102,7 @@ jobs: - uses: actions/setup-python@v5 with: - python-version: '3.8' + python-version: '3.9' - name: Download CVAT server image uses: actions/download-artifact@v4 @@ -126,8 +126,6 @@ jobs: run: | docker load --input /tmp/cvat_server/image.tar docker load --input /tmp/cvat_ui/image.tar - docker tag cvat/server:latest cvat/server:${CVAT_VERSION} - docker tag cvat/ui:latest cvat/ui:${CVAT_VERSION} docker image ls -a - name: Verify API schema @@ -158,7 +156,7 @@ jobs: - name: Install SDK run: | pip3 install -r ./tests/python/requirements.txt \ - -e './cvat-sdk[pytorch]' -e ./cvat-cli \ + -e './cvat-sdk[masks,pytorch]' -e ./cvat-cli \ --extra-index-url https://download.pytorch.org/whl/cpu - name: Running REST API and SDK tests @@ -203,7 +201,6 @@ jobs: - name: Load Docker server image run: | docker load --input /tmp/cvat_server/image.tar - docker tag cvat/server:latest cvat/server:${CVAT_VERSION} docker image ls -a - name: Running OPA tests @@ -280,8 +277,6 @@ jobs: run: | docker load --input /tmp/cvat_server/image.tar docker load --input /tmp/cvat_ui/image.tar - docker tag cvat/server:latest cvat/server:${CVAT_VERSION} - docker tag cvat/ui:latest cvat/ui:${CVAT_VERSION} docker image ls -a - name: Run CVAT instance diff --git a/.github/workflows/isort.yml b/.github/workflows/isort.yml index 19332d917030..bf90604cbb2f 100644 --- a/.github/workflows/isort.yml +++ b/.github/workflows/isort.yml @@ -25,7 +25,7 @@ jobs: UPDATED_DIRS="${{steps.files.outputs.all_changed_files}}" if [[ ! -z $UPDATED_DIRS ]]; then - pipx install $(egrep "isort.*" ./cvat-cli/requirements/development.txt) + pipx install $(grep "^isort" ./dev/requirements.txt) echo "isort version: $(isort --version-number)" echo "The dirs will be checked: $UPDATED_DIRS" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0c9211b0c4a5..becca0218f94 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -67,7 +67,7 @@ jobs: cache-from: type=local,src=/tmp/cvat_cache_server context: . file: Dockerfile - tags: cvat/server + tags: cvat/server:${{ env.CVAT_VERSION }} outputs: type=docker,dest=/tmp/cvat_server/image.tar - name: Instrumentation of the code then rebuilding the CVAT UI @@ -81,7 +81,7 @@ jobs: cache-from: type=local,src=/tmp/cvat_cache_ui context: . file: Dockerfile.ui - tags: cvat/ui + tags: cvat/ui:${{ env.CVAT_VERSION }} outputs: type=docker,dest=/tmp/cvat_ui/image.tar - name: CVAT SDK. Build @@ -95,7 +95,7 @@ jobs: id: verify_schema run: | docker load --input /tmp/cvat_server/image.tar - docker run --rm cvat/server bash \ + docker run --rm "cvat/server:${CVAT_VERSION}" bash \ -c 'python manage.py spectacular' > cvat/schema-expected.yml if ! git diff --no-index cvat/schema.yml cvat/schema-expected.yml; then @@ -109,7 +109,7 @@ jobs: - name: Verify migrations run: | - docker run --rm cvat/server bash \ + docker run --rm "cvat/server:${CVAT_VERSION}" bash \ -c 'python manage.py makemigrations --check' - name: Upload CVAT server artifact @@ -138,7 +138,7 @@ jobs: - uses: actions/setup-python@v5 with: - python-version: '3.8' + python-version: '3.9' - name: Download CVAT server image uses: actions/download-artifact@v4 @@ -156,8 +156,6 @@ jobs: run: | docker load --input /tmp/cvat_server/image.tar docker load --input /tmp/cvat_ui/image.tar - docker tag cvat/server:latest cvat/server:${CVAT_VERSION} - docker tag cvat/ui:latest cvat/ui:${CVAT_VERSION} docker image ls -a - name: Generate SDK @@ -168,7 +166,7 @@ jobs: - name: Install SDK run: | pip3 install -r ./tests/python/requirements.txt \ - -e './cvat-sdk[pytorch]' -e ./cvat-cli \ + -e './cvat-sdk[masks,pytorch]' -e ./cvat-cli \ --extra-index-url https://download.pytorch.org/whl/cpu - name: Run REST API and SDK tests @@ -221,7 +219,6 @@ jobs: - name: Load Docker server image run: | docker load --input /tmp/cvat_server/image.tar - docker tag cvat/server:latest cvat/server:${CVAT_VERSION} docker image ls -a - name: Running OPA tests @@ -304,8 +301,6 @@ jobs: run: | docker load --input /tmp/cvat_server/image.tar docker load --input /tmp/cvat_ui/image.tar - docker tag cvat/server:latest cvat/server:${CVAT_VERSION} - docker tag cvat/ui:latest cvat/ui:${CVAT_VERSION} docker image ls -a - name: Run CVAT instance @@ -426,10 +421,10 @@ jobs: SERVER_IMAGE_REPO: ${{ secrets.DOCKERHUB_WORKSPACE }}/server UI_IMAGE_REPO: ${{ secrets.DOCKERHUB_WORKSPACE }}/ui run: | - docker tag cvat/server:latest "${SERVER_IMAGE_REPO}:dev" + docker tag "cvat/server:${CVAT_VERSION}" "${SERVER_IMAGE_REPO}:dev" docker push "${SERVER_IMAGE_REPO}:dev" - docker tag cvat/ui:latest "${UI_IMAGE_REPO}:dev" + docker tag "cvat/ui:${CVAT_VERSION}" "${UI_IMAGE_REPO}:dev" docker push "${UI_IMAGE_REPO}:dev" codecov: diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml index d808a823771f..05237f441988 100644 --- a/.github/workflows/pylint.yml +++ b/.github/workflows/pylint.yml @@ -19,11 +19,11 @@ jobs: CHANGED_FILES="${{steps.files.outputs.all_changed_files}}" if [[ ! -z $CHANGED_FILES ]]; then - pipx install $(egrep "^pylint==" ./cvat/requirements/development.txt) + pipx install $(grep "^pylint==" ./dev/requirements.txt) pipx inject pylint \ - $(egrep "^pylint-.+==" ./cvat/requirements/development.txt) \ - $(egrep "^django==" ./cvat/requirements/base.txt) + $(grep "^pylint-.\+==" ./dev/requirements.txt) \ + $(grep "^django==" ./cvat/requirements/base.txt) echo "Pylint version: "$(pylint --version | head -1) echo "The files will be checked: "$(echo $CHANGED_FILES) diff --git a/.github/workflows/schedule.yml b/.github/workflows/schedule.yml index c2071cd85d13..bf74b30df047 100644 --- a/.github/workflows/schedule.yml +++ b/.github/workflows/schedule.yml @@ -5,9 +5,8 @@ on: workflow_dispatch: env: - SERVER_IMAGE_TEST_REPO: cvat_server - UI_IMAGE_TEST_REPO: instrumentation_cvat_ui CYPRESS_VERIFY_TIMEOUT: 180000 # https://docs.cypress.io/guides/guides/command-line#cypress-verify + CVAT_VERSION: "local" jobs: check_updates: @@ -48,12 +47,6 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_CI_USERNAME }} - password: ${{ secrets.DOCKERHUB_CI_TOKEN }} - - name: CVAT server. Getting cache from the default branch uses: actions/cache@v4 with: @@ -66,34 +59,23 @@ jobs: path: /tmp/cvat_cache_ui key: ${{ runner.os }}-build-ui-${{ needs.search_cache.outputs.sha }} - - name: CVAT server. Extract metadata (tags, labels) for Docker - id: meta-server - uses: docker/metadata-action@master - with: - images: ${{ secrets.DOCKERHUB_CI_WORKSPACE }}/${{ env.SERVER_IMAGE_TEST_REPO }} - tags: - type=raw,value=nightly - - - name: CVAT UI. Extract metadata (tags, labels) for Docker - id: meta-ui - uses: docker/metadata-action@master - with: - images: ${{ secrets.DOCKERHUB_CI_WORKSPACE }}/${{ env.UI_IMAGE_TEST_REPO }} - tags: - type=raw,value=nightly - - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + - name: Create artifact directories + run: | + mkdir /tmp/cvat_server + mkdir /tmp/cvat_ui + mkdir /tmp/cvat_sdk + - name: CVAT server. Build and push uses: docker/build-push-action@v6 with: cache-from: type=local,src=/tmp/cvat_cache_server context: . file: Dockerfile - push: true - tags: ${{ steps.meta-server.outputs.tags }} - labels: ${{ steps.meta-server.outputs.labels }} + tags: cvat/server:${{ env.CVAT_VERSION }} + outputs: type=docker,dest=/tmp/cvat_server/image.tar - name: CVAT UI. Build and push uses: docker/build-push-action@v6 @@ -101,9 +83,20 @@ jobs: cache-from: type=local,src=/tmp/cvat_cache_ui context: . file: Dockerfile.ui - push: true - tags: ${{ steps.meta-ui.outputs.tags }} - labels: ${{ steps.meta-ui.outputs.labels }} + tags: cvat/ui:${{ env.CVAT_VERSION }} + outputs: type=docker,dest=/tmp/cvat_ui/image.tar + + - name: Upload CVAT server artifact + uses: actions/upload-artifact@v4 + with: + name: cvat_server + path: /tmp/cvat_server/image.tar + + - name: Upload CVAT UI artifact + uses: actions/upload-artifact@v4 + with: + name: cvat_ui + path: /tmp/cvat_ui/image.tar unit_testing: needs: build @@ -113,43 +106,25 @@ jobs: - uses: actions/setup-python@v5 with: - python-version: '3.8' + python-version: '3.9' - - name: Getting CVAT UI cache from the default branch - uses: actions/cache@v4 + - name: Download CVAT server image + uses: actions/download-artifact@v4 with: - path: /tmp/cvat_cache_ui - key: ${{ runner.os }}-build-ui-${{ needs.search_cache.outputs.sha }} + name: cvat_server + path: /tmp/cvat_server/ - - name: Building CVAT UI image - uses: docker/build-push-action@v6 + - name: Download CVAT UI images + uses: actions/download-artifact@v4 with: - context: . - file: ./Dockerfile.ui - cache-from: type=local,src=/tmp/cvat_cache_ui - tags: cvat/ui:latest - load: true + name: cvat_ui + path: /tmp/cvat_ui/ - - name: CVAT server. Extract metadata (tags, labels) for Docker - id: meta-server - uses: docker/metadata-action@master - with: - images: ${{ secrets.DOCKERHUB_CI_WORKSPACE }}/${{ env.SERVER_IMAGE_TEST_REPO }} - tags: - type=raw,value=nightly - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_CI_USERNAME }} - password: ${{ secrets.DOCKERHUB_CI_TOKEN }} - - - name: Pull CVAT server image + - name: Load Docker images run: | - docker pull ${{ steps.meta-server.outputs.tags }} - docker tag ${{ steps.meta-server.outputs.tags }} cvat/server:local - docker tag ${{ steps.meta-server.outputs.tags }} cvat/server:latest - docker tag cvat/ui:latest cvat/ui:local + docker load --input /tmp/cvat_server/image.tar + docker load --input /tmp/cvat_ui/image.tar + docker image ls -a - name: OPA tests run: | @@ -210,35 +185,23 @@ jobs: with: node-version: '16.x' - - name: Login to Docker Hub - uses: docker/login-action@v3 + - name: Download CVAT server image + uses: actions/download-artifact@v4 with: - username: ${{ secrets.DOCKERHUB_CI_USERNAME }} - password: ${{ secrets.DOCKERHUB_CI_TOKEN }} + name: cvat_server + path: /tmp/cvat_server/ - - name: CVAT server. Extract metadata (tags, labels) for Docker - id: meta-server - uses: docker/metadata-action@master + - name: Download CVAT UI image + uses: actions/download-artifact@v4 with: - images: ${{ secrets.DOCKERHUB_CI_WORKSPACE }}/${{ env.SERVER_IMAGE_TEST_REPO }} - tags: - type=raw,value=nightly + name: cvat_ui + path: /tmp/cvat_ui/ - - name: CVAT UI. Extract metadata (tags, labels) for Docker - id: meta-ui - uses: docker/metadata-action@master - with: - images: ${{ secrets.DOCKERHUB_CI_USERNAME }}/${{ env.UI_IMAGE_TEST_REPO }} - tags: - type=raw,value=nightly - - - name: Pull CVAT UI image + - name: Load Docker images run: | - docker pull ${{ steps.meta-server.outputs.tags }} - docker tag ${{ steps.meta-server.outputs.tags }} cvat/server:dev - - docker pull ${{ steps.meta-ui.outputs.tags }} - docker tag ${{ steps.meta-ui.outputs.tags }} cvat/ui:dev + docker load --input /tmp/cvat_server/image.tar + docker load --input /tmp/cvat_ui/image.tar + docker image ls -a - name: Run CVAT instance run: | diff --git a/.gitignore b/.gitignore index 9736baa80a3f..c375c7df4e7e 100644 --- a/.gitignore +++ b/.gitignore @@ -49,6 +49,8 @@ yarn-error.log* # Ignore all the installed packages node_modules +venv/ +.venv/ # Ignore all js dists cvat-data/dist diff --git a/.vscode/launch.json b/.vscode/launch.json index 5ed666059a9d..cb4b0f9dcf0f 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -4,6 +4,7 @@ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ + { "name": "REST API tests: Attach to server", "type": "debugpy", @@ -168,7 +169,7 @@ "CVAT_SERVERLESS": "1", "ALLOWED_HOSTS": "*", "DJANGO_LOG_SERVER_HOST": "localhost", - "DJANGO_LOG_SERVER_PORT": "8282" + "DJANGO_LOG_SERVER_PORT": "8282", }, "args": [ "runserver", @@ -178,7 +179,7 @@ ], "django": true, "cwd": "${workspaceFolder}", - "console": "internalConsole" + "console": "internalConsole", }, { "name": "server: chrome", @@ -360,6 +361,28 @@ }, "console": "internalConsole" }, + { + "name": "server: RQ - chunks", + "type": "debugpy", + "request": "launch", + "stopOnEntry": false, + "justMyCode": false, + "python": "${command:python.interpreterPath}", + "program": "${workspaceFolder}/manage.py", + "args": [ + "rqworker", + "chunks", + "--worker-class", + "cvat.rqworker.SimpleWorker" + ], + "django": true, + "cwd": "${workspaceFolder}", + "env": { + "DJANGO_LOG_SERVER_HOST": "localhost", + "DJANGO_LOG_SERVER_PORT": "8282" + }, + "console": "internalConsole" + }, { "name": "server: migrate", "type": "debugpy", @@ -376,6 +399,22 @@ "env": {}, "console": "internalConsole" }, + { + "name": "server: sync periodic jobs", + "type": "debugpy", + "request": "launch", + "justMyCode": false, + "stopOnEntry": false, + "python": "${command:python.interpreterPath}", + "program": "${workspaceFolder}/manage.py", + "args": [ + "syncperiodicjobs" + ], + "django": true, + "cwd": "${workspaceFolder}", + "env": {}, + "console": "internalConsole" + }, { "name": "server: tests", "type": "debugpy", @@ -405,6 +444,8 @@ "python": "${command:python.interpreterPath}", "module": "pytest", "args": [ + "--verbose", + "--no-cov", // vscode debugger might not work otherwise "tests/python/rest_api/" ], "cwd": "${workspaceFolder}", @@ -537,7 +578,8 @@ "server: RQ - scheduler", "server: RQ - quality reports", "server: RQ - analytics reports", - "server: RQ - cleaning" + "server: RQ - cleaning", + "server: RQ - chunks", ] } ] diff --git a/.vscode/settings.json b/.vscode/settings.json index a0caaf036765..baf7dc5b3879 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -29,6 +29,15 @@ "database": "${workspaceFolder:cvat}/db.sqlite3" } ], + "python.analysis.exclude": [ + // VS Code defaults + "**/node_modules", + "**/__pycache__", + ".git", + + "cvat-cli/build", + "cvat-sdk/build", + ], "python.defaultInterpreterPath": "${workspaceFolder}/.env/", "python.testing.pytestArgs": [ "--rootdir","${workspaceFolder}/tests/" diff --git a/CHANGELOG.md b/CHANGELOG.md index a2cd17ae9868..31a4aae3db7f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,360 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 + +## \[2.24.0\] - 2024-12-20 + +### Added + +- \[CLI\] Added new commands: `project create`, `project delete`, `project ls` + () + +- \[SDK\] You can now use `client.projects.remove_by_ids` to remove multiple + projects + () + +- Support for boolean parameters in annotations actions + () + +### Changed + +- Improved uniformity of validation frames distribution in honeypot tasks and + random honeypot rerolls + () + +- \[CLI\] Switched to a new subcommand hierarchy; now CLI subcommands + have the form `cvat-cli ` + () + +- \[CLI\] The output of the `task create`, `task create-from-backup` and + `project create` commands is now just the created resource ID, + making it machine-readable + () + +- /api/events can now be used to receive events from several sources + () + +### Deprecated + +- \[CLI\] All existing CLI commands of the form `cvat-cli ` + are now deprecated. Use `cvat-cli task ` instead + () + +### Removed + +- Automatic calculation of quality reports in tasks + () + +### Fixed + +- Uploading a skeleton template in configurator does not work + () + +- Installation of YOLOv7 on GPU + () + +- \[Server API\] Significantly improved preformance of honeypot changes in tasks + () +- \[Server API\] `PATCH tasks/id/validation_layout` responses now include correct + `disabled_frames` and handle simultaneous updates of + `disabled_frames` and honeypot frames correctly + () + +- Fixed handling of tracks keyframes from deleted frames on export + () + +- Exporting datasets could start significantly later than expected, both for 1 + and several users in the same project/task/job () +- Scheduled RQ jobs could not be restarted due to incorrect RQ job status + updating and handling () + + +## \[2.23.1\] - 2024-12-09 + +### Changed + +- \[CLI\] Log messages are now printed on stderr rather than stdout + () + +### Fixed + +- Optimized memory consumption and reduced the number of database queries + when importing annotations to a task with a lot of jobs and images + () + +- Incorrect display of validation frames on the task quality management page + () + +- Player may navigate to removed frames when playing + () + +- User may navigate forward with a keyboard when a modal opened + () + +- fit:canvas event is not generated if to fit it from the controls sidebar + () + +- Color of 'Create object URL' button for a not saved on the server object + () + +- Failed request for a chunk inside a job after it was recently modified by updating `validation_layout` + () + +- Memory consumption during preparation of image chunks + () + +- Possible endless lock acquisition for chunk preparation job + () + +- Fixed issue: Cannot read properties of undefined (reading 'getUpdated') + () + + +## \[2.23.0\] - 2024-11-29 + +### Added + +- Support for direct .json file import in Datumaro format + () + +- \[SDK, CLI\] Added a `conf_threshold` parameter to + `cvat_sdk.auto_annotation.annotate_task`, which is passed as-is to the AA + function object via the context. The CLI equivalent is `auto-annotate + --conf-threshold`. This makes it easier to write and use AA functions that + support object filtering based on confidence levels + () + +- \[SDK\] Built-in auto-annotation functions now support object filtering by + confidence level + () + +- New events (create|update|delete):(membership|webhook) and (create|delete):invitation + () + +- \[SDK\] Added new auto-annotation helpers (`mask`, `polygon`, `encode_mask`) + to support AA functions that return masks or polygons + () + +- \[SDK\] Added a new built-in auto-annotation function, + `torchvision_instance_segmentation` + () + +- \[SDK, CLI\] Added a new auto-annotation parameter, `conv_mask_to_poly` + (`--conv-mask-to-poly` in the CLI) + () + +- A user may undo or redo changes, made by an annotations actions using general approach (e.g. Ctrl+Z, Ctrl+Y) + () + +- Basically, annotations actions now support any kinds of objects (shapes, tracks, tags) + () + +- A user may run annotations actions on a certain object (added corresponding object menu item) + () + +- A shortcut to open annotations actions modal for a currently selected object + () + +- A default role if IAM_TYPE='LDAP' and if the user is not a member of any group in 'DJANGO_AUTH_LDAP_GROUPS' () + +- The `POST /api/lambda/requests` endpoint now has a `conv_mask_to_poly` + parameter with the same semantics as the old `convMaskToPoly` parameter + () + +- \[SDK\] Model instances can now be pickled + () + +### Changed + +- Chunks are now prepared in a separate worker process + () + +- \[Helm\] Traefik sticky sessions for the backend service are disabled + () + +- Payload for events (create|update|delete):(shapes|tags|tracks) does not include frame and attributes anymore + () + +### Deprecated + +- The `convMaskToPoly` parameter of the `POST /api/lambda/requests` endpoint + is deprecated; use `conv_mask_to_poly` instead + () + +### Removed + +- It it no longer possible to run lambda functions on compressed images; + original images will always be used + () + +### Fixed + +- Export without images in Datumaro format should include image info + () + +- Inconsistent zOrder behavior on job open + () + +- Ground truth annotations can be shown in standard mode + () + +- Keybinds in UI allow drawing disabled shape types + () + +- Style issues on the Quality page when browser zoom is applied + () +- Flickering of masks in review mode, even when no conflicts are highlighted + () + +- Fixed security header duplication in HTTP responses from the backend + () + +- The error occurs when trying to copy/paste a mask on a video after opening the job + () + +- Attributes do not get copied when copy/paste a mask + () + + +## \[2.22.0\] - 2024-11-11 + +### Added + +- Feature to hide a mask during editing () + +- A quality setting to compare point groups without using bbox + () + +- A quality check option to consider empty frames matching + () + +### Changed + +- Reduced memory usage of the utils container + () + +### Removed + +- Removed unused business group + () + +### Fixed + +- Propagation creates copies on non-existing frames in a ground truth job + () + +- Exporting projects with tasks containing honeypots. Honeypots are no longer exported. + () + +- Error after creating GT job on Create job page with frame selection method `random_per_job` + () + +- Fixed issue 'Cannot read properties of undefined (reading 'push')' + () + +- Re-newed import/export request failed immediately if the previous failed + () + +- Fixed automatic zooming in attribute annotation mode for masks + () + +- Export dataset in CVAT format misses frames in tasks with non-default frame step + () + +- Incorrect progress representation on `Requests` page + () + + +## \[2.21.3\] - 2024-10-31 + +### Changed + +- CLI no longer prints the stack trace in case of HTTP errors + () + +### Removed + +- Dropped support for Python 3.8 since its EOL was on 2024-10-07 + () + +### Fixed + +- Requests page crush with `Cannot read property 'target' of undefined` error + () + +- Tags in ground truth job were displayed as `tag (GT)` + () + +- Tags in ground truth job couldn't be deleted via `x` button + () + +- Exception 'Canvas is busy' when change frame during drag/resize a track + () + +- A shape gets shifted if auto save triggered during dragging + () + + +## \[2.21.2\] - 2024-10-24 + +### Added + +- Access to /analytics can now be granted + () + +### Fixed + +- Expired sessions are now cleared from the database daily + () + +- Fixed export/import errors for tracks with duplicated shapes. + Fixed a bug which caused shape duplication on track import. + () + +- Fix Grafana container restart policy + () + +- Fixed some interface tooltips having 'undefined' shortcuts + () + +- Memory consumption during preparation of image chunks + () + +- Fixed a bug where an export RQ job being retried may break scheduling + of new jobs + () + +- UI now allows the user to start automatic annotation again + if the previous request fails + () + + +## \[2.21.1\] - 2024-10-18 + +### Added + +- Keyboard shortcuts for **brush**, **eraser**, **polygon** and **polygon remove** tools on masks drawing toolbox + () + +### Fixed + +- Ground truth tracks are displayed not only on GT frames in review mode + () + +- Incorrect navigation by keyframes when annotation job ends earlier than track in a ground truth job + () +- Tracks from a ground truth job displayed on wrong frames in review mode when frame step is not equal to 1 + () + +- Task creation with cloud storage data and GT_POOL validation mode + () + +- Incorrect quality reports and immediate feedback with non default start frame or frame step + () + +- av context closing issue when using AUTO thread_type + () + ## \[2.21.0\] - 2024-10-10 @@ -237,7 +591,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Deprecated - Client events `upload:annotations`, `lock:object`, `change:attribute`, `change:label` - () + () ### Removed @@ -264,7 +618,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 () - Sometimes it is not possible to switch workspace because active control broken after -trying to create a tag with a shortcut () + trying to create a tag with a shortcut + () ## \[2.16.3\] - 2024-08-13 @@ -305,13 +660,14 @@ trying to create a tag with a shortcut () + **Asset is already related to another guide** + () - Undo can't be done when a shape is rotated () - Exporting a skeleton track in a format defined for shapes raises error -`operands could not be broadcast together with shapes (X, ) (Y, )` + `operands could not be broadcast together with shapes (X, ) (Y, )` () - Delete label modal window does not have cancellation button @@ -330,10 +686,11 @@ trying to create a tag with a shortcut () - API call to run automatic annotations fails on a model with attributes - when mapping not provided in the request () + when mapping not provided in the request + () - Fixed a label collision issue where labels with similar prefixes -and numeric suffixes could conflict, causing error on export. + and numeric suffixes could conflict, causing error on export. () @@ -370,9 +727,9 @@ and numeric suffixes could conflict, causing error on export. ### Added - Set of features to track background activities: importing/exporting datasets, annotations or backups, creating tasks. -Now you may find these processes on Requests page, it allows a user to understand current status of these activities -and enhances user experience, not losing progress when the browser tab is closed -() + Now you may find these processes on Requests page, it allows a user to understand current status of these activities + and enhances user experience, not losing progress when the browser tab is closed + () - User now may update a job state from the corresponding task page () @@ -383,7 +740,8 @@ and enhances user experience, not losing progress when the browser tab is closed ### Changed - "Finish the job" button on annotation view now only sets state to 'completed'. - The job stage keeps unchanged () + The job stage keeps unchanged + () - Log files for individual backend processes are now stored in ephemeral storage of each backend container rather than in the `cvat_logs` volume @@ -395,7 +753,7 @@ and enhances user experience, not losing progress when the browser tab is closed ### Removed - Renew the job button in annotation menu was removed - () + () ### Fixed @@ -443,10 +801,12 @@ and enhances user experience, not losing progress when the browser tab is closed () - Exception 'this.el.node.getScreenCTM() is null' occuring in Firefox when -a user resizes window during skeleton dragging/resizing () + a user resizes window during skeleton dragging/resizing + () - Exception 'Edge's nodeFrom M or nodeTo N do not to refer to any node' -occuring when a user resizes window during skeleton dragging/resizing () + occuring when a user resizes window during skeleton dragging/resizing + () - Slightly broken layout when running attributed face detection model () @@ -504,7 +864,8 @@ occuring when a user resizes window during skeleton dragging/resizing () - When use route `/auth/login-with-token/` without `next` query parameter -the page reloads infinitely () + the page reloads infinitely + () - Fixed kvrocks port naming for istio () @@ -675,7 +1036,7 @@ the page reloads infinitely () - Opening update CS page sends infinite requests when CS id does not exist () -Uploading files with TUS immediately failed when one of the requests failed +- Uploading files with TUS immediately failed when one of the requests failed () - Longer analytics report calculation because of inefficient requests to analytics db @@ -845,7 +1206,7 @@ Uploading files with TUS immediately failed when one of the requests failed () - 90 deg-rotated video was added with "Prefer Zip Chunks" disabled -was warped, fixed using the static cropImage function. + was warped, fixed using the static cropImage function. () @@ -883,7 +1244,7 @@ was warped, fixed using the static cropImage function. ### Added - Single shape annotation mode allowing to easily annotate scenarious where a user -only needs to draw one object on one image () + only needs to draw one object on one image () ### Fixed @@ -1011,7 +1372,7 @@ only needs to draw one object on one image () - \[Compose, Helm\] Updated Clickhouse to version 23.11.* @@ -1060,11 +1421,11 @@ longer accepted automatically. Instead, the invitee can now review the invitatio () - Error message `Edge's nodeFrom ${dataNodeFrom} or nodeTo ${dataNodeTo} do not to refer to any node` - when upload a file with some abscent skeleton nodes () + when upload a file with some abscent skeleton nodes () - Wrong context menu position in skeleton configurator (Firefox only) - () + () - Fixed console error `(Error: attribute width: A negative value is not valid` - appearing when skeleton with all outside elements is created () + appearing when skeleton with all outside elements is created () - Updating cloud storage attached to CVAT using Azure connection string () @@ -1075,7 +1436,7 @@ longer accepted automatically. Instead, the invitee can now review the invitatio ### Added - Introduced CVAT actions. Actions allow performing different - predefined scenarios on annotations automatically (e.g. shape converters) + predefined scenarios on annotations automatically (e.g. shape converters) () - The UI will now retry requests that were rejected due to rate limiting diff --git a/Dockerfile b/Dockerfile index 8a10a34b771b..00dea1de30d0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -134,6 +134,7 @@ RUN apt-get update && \ supervisor \ tzdata \ unrar \ + wait-for-it \ && ln -fs /usr/share/zoneinfo/${TZ} /etc/localtime && \ dpkg-reconfigure -f noninteractive tzdata && \ rm -rf /var/lib/apt/lists/* && \ @@ -192,7 +193,7 @@ RUN python -m pip uninstall -y pip COPY cvat/nginx.conf /etc/nginx/nginx.conf COPY --chown=${USER} components /tmp/components COPY --chown=${USER} supervisord/ ${HOME}/supervisord -COPY --chown=${USER} wait-for-it.sh manage.py backend_entrypoint.sh wait_for_deps.sh ${HOME}/ +COPY --chown=${USER} manage.py backend_entrypoint.sh wait_for_deps.sh ${HOME}/ COPY --chown=${USER} utils/ ${HOME}/utils COPY --chown=${USER} cvat/ ${HOME}/cvat COPY --chown=${USER} rqscheduler.py ${HOME} diff --git a/Dockerfile.ui b/Dockerfile.ui index 170ee1a76633..f134f5d62883 100644 --- a/Dockerfile.ui +++ b/Dockerfile.ui @@ -1,11 +1,5 @@ FROM node:lts-slim AS cvat-ui -ARG WA_PAGE_VIEW_HIT -ARG UI_APP_CONFIG -ARG CLIENT_PLUGINS -ARG DISABLE_SOURCE_MAPS -ARG SOURCE_MAPS_TOKEN - ENV TERM=xterm \ LANG='C.UTF-8' \ LC_ALL='C.UTF-8' @@ -29,6 +23,12 @@ COPY cvat-core/ /tmp/cvat-core/ COPY cvat-canvas3d/ /tmp/cvat-canvas3d/ COPY cvat-canvas/ /tmp/cvat-canvas/ COPY cvat-ui/ /tmp/cvat-ui/ + +ARG UI_APP_CONFIG +ARG CLIENT_PLUGINS +ARG DISABLE_SOURCE_MAPS +ARG SOURCE_MAPS_TOKEN + RUN CLIENT_PLUGINS="${CLIENT_PLUGINS}" \ DISABLE_SOURCE_MAPS="${DISABLE_SOURCE_MAPS}" \ UI_APP_CONFIG="${UI_APP_CONFIG}" \ diff --git a/backend_entrypoint.sh b/backend_entrypoint.sh index c8b681eabb4d..bac37c76e5be 100755 --- a/backend_entrypoint.sh +++ b/backend_entrypoint.sh @@ -8,7 +8,7 @@ fail() { } wait_for_db() { - ~/wait-for-it.sh "${CVAT_POSTGRES_HOST}:${CVAT_POSTGRES_PORT:-5432}" -t 0 + wait-for-it "${CVAT_POSTGRES_HOST}:${CVAT_POSTGRES_PORT:-5432}" -t 0 } cmd_bash() { @@ -18,6 +18,9 @@ cmd_bash() { cmd_init() { wait_for_db ~/manage.py migrate + + wait-for-it "${CVAT_REDIS_INMEM_HOST}:${CVAT_REDIS_INMEM_PORT:-6379}" -t 0 + ~/manage.py syncperiodicjobs } cmd_run() { diff --git a/changelog.d/20241009_101726_klakhov_brush_shortcuts.md b/changelog.d/20241009_101726_klakhov_brush_shortcuts.md deleted file mode 100644 index 8d70aac199be..000000000000 --- a/changelog.d/20241009_101726_klakhov_brush_shortcuts.md +++ /dev/null @@ -1,4 +0,0 @@ -### Added - -- Keyboard shortcuts for **brush**, **eraser**, **polygon** and **polygon remove** tools on masks drawing toolbox - () diff --git a/changelog.d/20241011_132931_sekachev.bs_fixed_state_destructurization.md b/changelog.d/20241011_132931_sekachev.bs_fixed_state_destructurization.md deleted file mode 100644 index 2693c7fb1327..000000000000 --- a/changelog.d/20241011_132931_sekachev.bs_fixed_state_destructurization.md +++ /dev/null @@ -1,4 +0,0 @@ -### Fixed - -- Ground truth tracks are displayed not only on GT frames in review mode - () diff --git a/changelog.d/20241011_142625_sekachev.bs_fixed_navigation.md b/changelog.d/20241011_142625_sekachev.bs_fixed_navigation.md deleted file mode 100644 index 60aededd34b1..000000000000 --- a/changelog.d/20241011_142625_sekachev.bs_fixed_navigation.md +++ /dev/null @@ -1,6 +0,0 @@ -### Fixed - -- Incorrect navigation by keyframes when annotation job ends earlier than track in a ground truth job - () -- Tracks from a ground truth job displayed on wrong frames in review mode when frame step is not equal to 1 - () diff --git a/changelog.d/20241016_142620_maria_fix_task_creation_with_gt_pool_and_cs_data.md b/changelog.d/20241016_142620_maria_fix_task_creation_with_gt_pool_and_cs_data.md deleted file mode 100644 index 8772b7f6713e..000000000000 --- a/changelog.d/20241016_142620_maria_fix_task_creation_with_gt_pool_and_cs_data.md +++ /dev/null @@ -1,4 +0,0 @@ -### Fixed - -- Task creation with cloud storage data and GT_POOL validation mode - () diff --git a/changelog.d/20241016_180804_sekachev.bs.md b/changelog.d/20241016_180804_sekachev.bs.md deleted file mode 100644 index a16bb1a62f55..000000000000 --- a/changelog.d/20241016_180804_sekachev.bs.md +++ /dev/null @@ -1,4 +0,0 @@ -### Fixed - -- Incorrect quality reports and immediate feedback with non default start frame or frame step - () diff --git a/cvat-canvas/package.json b/cvat-canvas/package.json index 2b24ff47e347..c89e7506854c 100644 --- a/cvat-canvas/package.json +++ b/cvat-canvas/package.json @@ -1,6 +1,6 @@ { "name": "cvat-canvas", - "version": "2.20.9", + "version": "2.20.10", "type": "module", "description": "Part of Computer Vision Annotation Tool which presents its canvas library", "main": "src/canvas.ts", diff --git a/cvat-canvas/src/typescript/canvasModel.ts b/cvat-canvas/src/typescript/canvasModel.ts index 2c7a1f08d203..0225c738683b 100644 --- a/cvat-canvas/src/typescript/canvasModel.ts +++ b/cvat-canvas/src/typescript/canvasModel.ts @@ -96,6 +96,7 @@ export interface Configuration { controlPointsSize?: number; outlinedBorders?: string | false; resetZoom?: boolean; + hideEditedObject?: boolean; } export interface BrushTool { @@ -416,6 +417,7 @@ export class CanvasModelImpl extends MasterImpl implements CanvasModel { textPosition: consts.DEFAULT_SHAPE_TEXT_POSITION, textContent: consts.DEFAULT_SHAPE_TEXT_CONTENT, undefinedAttrValue: consts.DEFAULT_UNDEFINED_ATTR_VALUE, + hideEditedObject: false, }, imageBitmap: false, image: null, @@ -685,28 +687,34 @@ export class CanvasModelImpl extends MasterImpl implements CanvasModel { public fit(): void { const { angle } = this.data; + let updatedScale = this.data.scale; if ((angle / 90) % 2) { // 90, 270, .. - this.data.scale = Math.min( + updatedScale = Math.min( this.data.canvasSize.width / this.data.imageSize.height, this.data.canvasSize.height / this.data.imageSize.width, ); } else { - this.data.scale = Math.min( + updatedScale = Math.min( this.data.canvasSize.width / this.data.imageSize.width, this.data.canvasSize.height / this.data.imageSize.height, ); } - this.data.scale = Math.min(Math.max(this.data.scale, FrameZoom.MIN), FrameZoom.MAX); - this.data.top = this.data.canvasSize.height / 2 - this.data.imageSize.height / 2; - this.data.left = this.data.canvasSize.width / 2 - this.data.imageSize.width / 2; + updatedScale = Math.min(Math.max(updatedScale, FrameZoom.MIN), FrameZoom.MAX); + const updatedTop = this.data.canvasSize.height / 2 - this.data.imageSize.height / 2; + const updatedLeft = this.data.canvasSize.width / 2 - this.data.imageSize.width / 2; - // scale is changed during zooming or translating - // so, remember fitted scale to compute fit-relative scaling - this.data.fittedScale = this.data.scale; + if (updatedScale !== this.data.scale || updatedTop !== this.data.top || updatedLeft !== this.data.left) { + this.data.scale = updatedScale; + this.data.top = updatedTop; + this.data.left = updatedLeft; - this.notify(UpdateReasons.IMAGE_FITTED); + // scale is changed during zooming or translating + // so, remember fitted scale to compute fit-relative scaling + this.data.fittedScale = this.data.scale; + this.notify(UpdateReasons.IMAGE_FITTED); + } } public grid(stepX: number, stepY: number): void { @@ -981,6 +989,10 @@ export class CanvasModelImpl extends MasterImpl implements CanvasModel { this.data.configuration.CSSImageFilter = configuration.CSSImageFilter; } + if (typeof configuration.hideEditedObject === 'boolean') { + this.data.configuration.hideEditedObject = configuration.hideEditedObject; + } + this.notify(UpdateReasons.CONFIG_UPDATED); } diff --git a/cvat-canvas/src/typescript/canvasView.ts b/cvat-canvas/src/typescript/canvasView.ts index 480a5d3aea52..f21255ab4213 100644 --- a/cvat-canvas/src/typescript/canvasView.ts +++ b/cvat-canvas/src/typescript/canvasView.ts @@ -245,6 +245,53 @@ export class CanvasViewImpl implements CanvasView, Listener { this.canvas.dispatchEvent(event); } + private resetViewPosition(clientID: number): void { + const drawnState = this.drawnStates[clientID]; + const drawnShape = this.svgShapes[clientID]; + + if (drawnState && drawnShape) { + const { shapeType, points } = drawnState; + const translatedPoints: number[] = this.translateToCanvas(points); + const stringified = stringifyPoints(translatedPoints); + if (shapeType === 'cuboid') { + drawnShape.attr('points', stringified); + } else if (['polygon', 'polyline', 'points'].includes(shapeType)) { + (drawnShape as SVG.PolyLine | SVG.Polygon).plot(stringified); + if (shapeType === 'points') { + this.selectize(false, drawnShape); + this.setupPoints(drawnShape as SVG.PolyLine, drawnState); + } + } else if (shapeType === 'rectangle') { + const [xtl, ytl, xbr, ybr] = translatedPoints; + drawnShape.rotate(0); + drawnShape.size(xbr - xtl, ybr - ytl).move(xtl, ytl); + drawnShape.rotate(drawnState.rotation); + } else if (shapeType === 'ellipse') { + const [cx, cy, rightX, topY] = translatedPoints; + const [rx, ry] = [rightX - cx, cy - topY]; + drawnShape.rotate(0); + drawnShape.size(rx * 2, ry * 2).center(cx, cy); + drawnShape.rotate(drawnState.rotation); + } else if (shapeType === 'skeleton') { + drawnShape.rotate(0); + for (const child of (drawnShape as SVG.G).children()) { + if (child.type === 'circle') { + const childClientID = child.attr('data-client-id'); + const element = drawnState.elements.find((el: any) => el.clientID === childClientID); + const [x, y] = this.translateToCanvas(element.points); + child.center(x, y); + } + } + drawnShape.rotate(drawnState.rotation); + } else if (shapeType === 'mask') { + const [left, top] = points.slice(-4); + drawnShape.move(this.geometry.offset + left, this.geometry.offset + top); + } else { + throw new Error('Not implemented'); + } + } + } + private onInteraction = ( shapes: InteractionResult[] | null, shapesUpdated = true, @@ -1114,6 +1161,7 @@ export class CanvasViewImpl implements CanvasView, Listener { } }).on('dragend', (e: CustomEvent): void => { if (aborted) { + this.resetViewPosition(state.clientID); return; } @@ -1172,6 +1220,7 @@ export class CanvasViewImpl implements CanvasView, Listener { this.draggableShape = null; aborted = true; // disable internal drag events of SVG.js + // call chain is (mouseup -> SVG.handler.end -> SVG.handler.drag -> dragend) window.dispatchEvent(new MouseEvent('mouseup')); }); } else { @@ -1303,6 +1352,7 @@ export class CanvasViewImpl implements CanvasView, Listener { }) .on('resizedone', (): void => { if (aborted) { + this.resetViewPosition(state.clientID); return; } @@ -1359,7 +1409,8 @@ export class CanvasViewImpl implements CanvasView, Listener { onResizeEnd(); aborted = true; this.resizableShape = null; - // disable internal drag events of SVG.js + // disable internal resize events of SVG.js + // call chain is (mouseup -> SVG.handler.end -> SVG.handler.resize-> resizeend) window.dispatchEvent(new MouseEvent('mouseup')); }); } else { @@ -1600,12 +1651,6 @@ export class CanvasViewImpl implements CanvasView, Listener { // Setup event handlers this.canvas.addEventListener('dblclick', (e: MouseEvent): void => { this.controller.fit(); - this.canvas.dispatchEvent( - new CustomEvent('canvas.fit', { - bubbles: false, - cancelable: true, - }), - ); e.preventDefault(); }); @@ -1845,6 +1890,15 @@ export class CanvasViewImpl implements CanvasView, Listener { }), ); } else if ([UpdateReasons.IMAGE_ZOOMED, UpdateReasons.IMAGE_FITTED].includes(reason)) { + if (reason === UpdateReasons.IMAGE_FITTED) { + this.canvas.dispatchEvent( + new CustomEvent('canvas.fit', { + bubbles: false, + cancelable: true, + }), + ); + } + this.moveCanvas(); this.transformCanvas(); } else if (reason === UpdateReasons.IMAGE_ROTATED) { @@ -1867,15 +1921,26 @@ export class CanvasViewImpl implements CanvasView, Listener { this.gridPattern.setAttribute('height', `${size.height}`); } else if (reason === UpdateReasons.SHAPE_FOCUSED) { const { padding, clientID } = this.controller.focusData; + const drawnState = this.drawnStates[clientID]; const object = this.svgShapes[clientID]; - if (object) { - const bbox: SVG.BBox = object.bbox(); - this.onFocusRegion( - bbox.x - padding, - bbox.y - padding, - bbox.width + padding * 2, - bbox.height + padding * 2, - ); + if (drawnState && object) { + const { offset } = this.geometry; + let [x, y, width, height] = [0, 0, 0, 0]; + + if (drawnState.shapeType === 'mask') { + const [xtl, ytl, xbr, ybr] = drawnState.points.slice(-4); + x = xtl + offset; + y = ytl + offset; + width = xbr - xtl + 1; + height = ybr - ytl + 1; + } else { + const bbox: SVG.BBox = object.bbox(); + ({ + x, y, width, height, + } = bbox); + } + + this.onFocusRegion(x - padding, y - padding, width + padding * 2, height + padding * 2); } } else if (reason === UpdateReasons.SHAPE_ACTIVATED) { this.activate(this.controller.activeElement); @@ -2815,6 +2880,9 @@ export class CanvasViewImpl implements CanvasView, Listener { const shapeView = window.document.getElementById(`cvat_canvas_shape_${clientID}`); if (shapeView) shapeView.classList.remove(this.getHighlightClassname()); }); + const redrawMasks = (highlightedElements.elementsIDs.length !== 0 || + this.highlightedElements.elementsIDs.length !== 0); + if (highlightedElements.elementsIDs.length) { this.highlightedElements = { ...highlightedElements }; this.canvas.classList.add('cvat-canvas-highlight-enabled'); @@ -2829,9 +2897,11 @@ export class CanvasViewImpl implements CanvasView, Listener { }; this.canvas.classList.remove('cvat-canvas-highlight-enabled'); } - const masks = Object.values(this.drawnStates).filter((state) => state.shapeType === 'mask'); - this.deleteObjects(masks); - this.addObjects(masks); + if (redrawMasks) { + const masks = Object.values(this.drawnStates).filter((state) => state.shapeType === 'mask'); + this.deleteObjects(masks); + this.addObjects(masks); + } if (this.highlightedElements.elementsIDs.length) { this.deactivate(); const clientID = this.highlightedElements.elementsIDs[0]; @@ -3401,7 +3471,7 @@ export class CanvasViewImpl implements CanvasView, Listener { return skeleton; } - private setupPoints(basicPolyline: SVG.PolyLine, state: any): any { + private setupPoints(basicPolyline: SVG.PolyLine, state: any | DrawnState): any { this.selectize(true, basicPolyline); const group: SVG.G = basicPolyline diff --git a/cvat-canvas/src/typescript/drawHandler.ts b/cvat-canvas/src/typescript/drawHandler.ts index b7e9cbb90130..77b674dec05e 100644 --- a/cvat-canvas/src/typescript/drawHandler.ts +++ b/cvat-canvas/src/typescript/drawHandler.ts @@ -5,7 +5,7 @@ import * as SVG from 'svg.js'; import 'svg.draw.js'; -import './svg.patch'; +import { CIRCLE_STROKE } from './svg.patch'; import { AutoborderHandler } from './autoborderHandler'; import { @@ -104,6 +104,7 @@ export class DrawHandlerImpl implements DrawHandler { private controlPointsSize: number; private selectedShapeOpacity: number; private outlinedBorders: string; + private isHidden: boolean; // we should use any instead of SVG.Shape because svg plugins cannot change declared interface // so, methods like draw() just undefined for SVG.Shape, but nevertheless they exist @@ -1276,6 +1277,7 @@ export class DrawHandlerImpl implements DrawHandler { this.selectedShapeOpacity = configuration.selectedShapeOpacity; this.outlinedBorders = configuration.outlinedBorders || 'black'; this.autobordersEnabled = false; + this.isHidden = false; this.startTimestamp = Date.now(); this.onDrawDoneDefault = onDrawDone; this.canvas = canvas; @@ -1301,10 +1303,28 @@ export class DrawHandlerImpl implements DrawHandler { }); } + private strokePoint(point: SVG.Element): void { + point.attr('stroke', this.isHidden ? 'none' : CIRCLE_STROKE); + point.fill({ opacity: this.isHidden ? 0 : 1 }); + } + + private updateHidden(value: boolean) { + this.isHidden = value; + + if (value) { + this.canvas.attr('pointer-events', 'none'); + } else { + this.canvas.attr('pointer-events', 'all'); + } + } + public configurate(configuration: Configuration): void { this.controlPointsSize = configuration.controlPointsSize; this.selectedShapeOpacity = configuration.selectedShapeOpacity; this.outlinedBorders = configuration.outlinedBorders || 'black'; + if (this.isHidden !== configuration.hideEditedObject) { + this.updateHidden(configuration.hideEditedObject); + } const isFillableRect = this.drawData && this.drawData.shapeType === 'rectangle' && @@ -1315,15 +1335,26 @@ export class DrawHandlerImpl implements DrawHandler { const isFilalblePolygon = this.drawData && this.drawData.shapeType === 'polygon'; if (this.drawInstance && (isFillableRect || isFillableCuboid || isFilalblePolygon)) { - this.drawInstance.fill({ opacity: configuration.selectedShapeOpacity }); + this.drawInstance.fill({ + opacity: configuration.hideEditedObject ? 0 : configuration.selectedShapeOpacity, + }); + } + + if (this.drawInstance && (isFilalblePolygon)) { + const paintHandler = this.drawInstance.remember('_paintHandler'); + if (paintHandler) { + for (const point of (paintHandler as any).set.members) { + this.strokePoint(point); + } + } } if (this.drawInstance && this.drawInstance.attr('stroke')) { - this.drawInstance.attr('stroke', this.outlinedBorders); + this.drawInstance.attr('stroke', configuration.hideEditedObject ? 'none' : this.outlinedBorders); } if (this.pointsGroup && this.pointsGroup.attr('stroke')) { - this.pointsGroup.attr('stroke', this.outlinedBorders); + this.pointsGroup.attr('stroke', configuration.hideEditedObject ? 'none' : this.outlinedBorders); } this.autobordersEnabled = configuration.autoborders; @@ -1369,6 +1400,7 @@ export class DrawHandlerImpl implements DrawHandler { const paintHandler = this.drawInstance.remember('_paintHandler'); for (const point of (paintHandler as any).set.members) { + this.strokePoint(point); point.attr('stroke-width', `${consts.POINTS_STROKE_WIDTH / geometry.scale}`); point.attr('r', `${this.controlPointsSize / geometry.scale}`); } diff --git a/cvat-canvas/src/typescript/editHandler.ts b/cvat-canvas/src/typescript/editHandler.ts index 567eea29c7de..84ecb1684ad4 100644 --- a/cvat-canvas/src/typescript/editHandler.ts +++ b/cvat-canvas/src/typescript/editHandler.ts @@ -472,7 +472,7 @@ export class EditHandlerImpl implements EditHandler { const paintHandler = this.editLine.remember('_paintHandler'); - for (const point of (paintHandler as any).set.members) { + for (const point of paintHandler.set.members) { point.attr('stroke-width', `${consts.POINTS_STROKE_WIDTH / geometry.scale}`); point.attr('r', `${this.controlPointsSize / geometry.scale}`); } diff --git a/cvat-canvas/src/typescript/masksHandler.ts b/cvat-canvas/src/typescript/masksHandler.ts index cdaa4d86d2fa..7f6a4e313fb3 100644 --- a/cvat-canvas/src/typescript/masksHandler.ts +++ b/cvat-canvas/src/typescript/masksHandler.ts @@ -6,7 +6,7 @@ import { fabric } from 'fabric'; import debounce from 'lodash/debounce'; import { - DrawData, MasksEditData, Geometry, Configuration, BrushTool, ColorBy, + DrawData, MasksEditData, Geometry, Configuration, BrushTool, ColorBy, Position, } from './canvasModel'; import consts from './consts'; import { DrawHandler } from './drawHandler'; @@ -61,10 +61,11 @@ export class MasksHandlerImpl implements MasksHandler { private editData: MasksEditData | null; private colorBy: ColorBy; - private latestMousePos: { x: number; y: number; }; + private latestMousePos: Position; private startTimestamp: number; private geometry: Geometry; private drawingOpacity: number; + private isHidden: boolean; private keepDrawnPolygon(): void { const canvasWrapper = this.canvas.getElement().parentElement; @@ -217,12 +218,29 @@ export class MasksHandlerImpl implements MasksHandler { private imageDataFromCanvas(wrappingBBox: WrappingBBox): Uint8ClampedArray { const imageData = this.canvas.toCanvasElement() .getContext('2d').getImageData( - wrappingBBox.left, wrappingBBox.top, - wrappingBBox.right - wrappingBBox.left + 1, wrappingBBox.bottom - wrappingBBox.top + 1, + wrappingBBox.left, + wrappingBBox.top, + wrappingBBox.right - wrappingBBox.left + 1, + wrappingBBox.bottom - wrappingBBox.top + 1, ).data; return imageData; } + private updateHidden(value: boolean) { + this.isHidden = value; + + // Need to update style of upper canvas explicitly because update of default cursor is not applied immediately + // https://github.com/fabricjs/fabric.js/issues/1456 + const newOpacity = value ? '0' : ''; + const newCursor = value ? 'inherit' : 'none'; + this.canvas.getElement().parentElement.style.opacity = newOpacity; + const upperCanvas = this.canvas.getElement().parentElement.querySelector('.upper-canvas') as HTMLElement; + if (upperCanvas) { + upperCanvas.style.cursor = newCursor; + } + this.canvas.defaultCursor = newCursor; + } + private updateBrushTools(brushTool?: BrushTool, opts: Partial = {}): void { if (this.isPolygonDrawing) { // tool was switched from polygon to brush for example @@ -350,6 +368,7 @@ export class MasksHandlerImpl implements MasksHandler { this.editData = null; this.drawingOpacity = 0.5; this.brushMarker = null; + this.isHidden = false; this.colorBy = ColorBy.LABEL; this.onDrawDone = onDrawDone; this.onDrawRepeat = onDrawRepeat; @@ -385,6 +404,10 @@ export class MasksHandlerImpl implements MasksHandler { rle.push(wrappingBbox.left, wrappingBbox.top, wrappingBbox.right, wrappingBbox.bottom); this.onDrawDone({ + occluded: this.drawData.initialState.occluded, + attributes: { ...this.drawData.initialState.attributes }, + color: this.drawData.initialState.color, + objectType: this.drawData.initialState.objectType, shapeType: this.drawData.shapeType, points: rle, label: this.drawData.initialState.label, @@ -452,7 +475,7 @@ export class MasksHandlerImpl implements MasksHandler { this.canvas.renderAll(); } - if (isMouseDown && !isBrushSizeChanging && ['brush', 'eraser'].includes(tool?.type)) { + if (isMouseDown && !this.isHidden && !isBrushSizeChanging && ['brush', 'eraser'].includes(tool?.type)) { const color = fabric.Color.fromHex(tool.color); color.setAlpha(tool.type === 'eraser' ? 1 : 0.5); @@ -530,6 +553,10 @@ export class MasksHandlerImpl implements MasksHandler { public configurate(configuration: Configuration): void { this.colorBy = configuration.colorBy; + + if (this.isHidden !== configuration.hideEditedObject) { + this.updateHidden(configuration.hideEditedObject); + } } public transform(geometry: Geometry): void { @@ -563,7 +590,10 @@ export class MasksHandlerImpl implements MasksHandler { const color = fabric.Color.fromHex(this.getStateColor(drawData.initialState)).getSource(); const [left, top, right, bottom] = points.slice(-4); const imageBitmap = expandChannels(color[0], color[1], color[2], points); - imageDataToDataURL(imageBitmap, right - left + 1, bottom - top + 1, + imageDataToDataURL( + imageBitmap, + right - left + 1, + bottom - top + 1, (dataURL: string) => new Promise((resolve) => { fabric.Image.fromURL(dataURL, (image: fabric.Image) => { try { @@ -654,7 +684,10 @@ export class MasksHandlerImpl implements MasksHandler { const color = fabric.Color.fromHex(this.getStateColor(editData.state)).getSource(); const [left, top, right, bottom] = points.slice(-4); const imageBitmap = expandChannels(color[0], color[1], color[2], points); - imageDataToDataURL(imageBitmap, right - left + 1, bottom - top + 1, + imageDataToDataURL( + imageBitmap, + right - left + 1, + bottom - top + 1, (dataURL: string) => new Promise((resolve) => { fabric.Image.fromURL(dataURL, (image: fabric.Image) => { try { diff --git a/cvat-canvas/src/typescript/svg.patch.ts b/cvat-canvas/src/typescript/svg.patch.ts index 40af155a956f..7b728b274335 100644 --- a/cvat-canvas/src/typescript/svg.patch.ts +++ b/cvat-canvas/src/typescript/svg.patch.ts @@ -86,6 +86,7 @@ SVG.Element.prototype.draw.extend( }), ); +export const CIRCLE_STROKE = '#000'; // Fix method drawCircles function drawCircles(): void { const array = this.el.array().valueOf(); @@ -109,6 +110,7 @@ function drawCircles(): void { .circle(5) .stroke({ width: 1, + color: CIRCLE_STROKE, }) .fill('#ccc') .center(p.x, p.y), diff --git a/cvat-cli/README.md b/cvat-cli/README.md index 71c19b79d908..bbd98c0980c9 100644 --- a/cvat-cli/README.md +++ b/cvat-cli/README.md @@ -1,19 +1,26 @@ # Command-line client for CVAT -A simple command line interface for working with CVAT tasks. At the moment it +A simple command line interface for working with CVAT. At the moment it implements a basic feature set but may serve as the starting point for a more comprehensive CVAT administration tool in the future. -Overview of functionality: +The following subcommands are supported: -- Create a new task (supports name, bug tracker, project, labels JSON, local/share/remote files) -- Delete tasks (supports deleting a list of task IDs) -- List all tasks (supports basic CSV or JSON output) -- Download JPEG frames (supports a list of frame IDs) -- Dump annotations (supports all formats via format string) -- Upload annotations for a task in the specified format (e.g. 'YOLO ZIP 1.0') -- Export and download a whole task -- Import a task +- Projects: + - `create` - create a new project + - `delete` - delete projects + - `ls` - list all projects + +- Tasks: + - `create` - create a new task + - `create-from-backup` - create a task from a backup file + - `delete` - delete tasks + - `ls` - list all tasks + - `frames` - download frames from a task + - `export-dataset` - export a task as a dataset + - `import-dataset` - import annotations into a task from a dataset + - `backup` - back up a task + - `auto-annotate` - automatically annotate a task using a local function ## Installation @@ -21,29 +28,25 @@ Overview of functionality: ## Usage -```bash -$ cvat-cli --help - -usage: cvat-cli [-h] [--version] [--auth USER:[PASS]] - [--server-host SERVER_HOST] [--server-port SERVER_PORT] [--debug] - {create,delete,ls,frames,dump,upload,export,import} ... - -Perform common operations related to CVAT tasks. - -positional arguments: - {create,delete,ls,frames,dump,upload,export,import} - -optional arguments: - -h, --help show this help message and exit - --version show program's version number and exit - --auth USER:[PASS] defaults to the current user and supports the PASS - environment variable or password prompt - (default: current user) - --server-host SERVER_HOST - host (default: localhost) - --server-port SERVER_PORT - port (default: 8080) - --debug show debug output +The general form of a CLI command is: + +```console +$ cvat-cli +``` + +where: + +- `` are options shared between all subcommands; +- `` is a CVAT resource, such as `task`; +- `` is the action to do with the resource, such as `create`; +- `` is any options specific to a particular resource and action. + +You can list available subcommands and options using the `--help` option: + +``` +$ cvat-cli --help # get help on available common options and resources +$ cvat-cli --help # get help on actions for the given resource +$ cvat-cli --help # get help on action-specific options ``` ## Examples @@ -51,7 +54,7 @@ optional arguments: Create a task with local images: ```bash -cvat-cli --auth user create +cvat-cli --auth user task create --labels '[{"name": "car"}, {"name": "person"}]' "test_task" "local" @@ -63,5 +66,5 @@ List tasks on a custom server with auth: ```bash cvat-cli --auth admin:password \ --server-host cvat.my.server.com --server-port 30123 \ - ls + task ls ``` diff --git a/cvat-cli/requirements/base.txt b/cvat-cli/requirements/base.txt index e9be53974d91..94b064e0ace5 100644 --- a/cvat-cli/requirements/base.txt +++ b/cvat-cli/requirements/base.txt @@ -1,3 +1,3 @@ -cvat-sdk~=2.22.0 +cvat-sdk~=2.24.1 Pillow>=10.3.0 setuptools>=70.0.0 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/cvat-cli/requirements/development.txt b/cvat-cli/requirements/development.txt deleted file mode 100644 index 42a144087213..000000000000 --- a/cvat-cli/requirements/development.txt +++ /dev/null @@ -1,5 +0,0 @@ --r base.txt - -black>=24.1 -isort>=5.10.1 -pylint>=2.7.0 \ No newline at end of file diff --git a/cvat-cli/setup.py b/cvat-cli/setup.py index 454ce2f00956..05b20a9165e1 100644 --- a/cvat-cli/setup.py +++ b/cvat-cli/setup.py @@ -56,7 +56,7 @@ def parse_requirements(filename=BASE_REQUIREMENTS_FILE): "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], - python_requires=">=3.8", + python_requires=">=3.9", install_requires=BASE_REQUIREMENTS, entry_points={ "console_scripts": [ diff --git a/cvat-cli/src/cvat_cli/__main__.py b/cvat-cli/src/cvat_cli/__main__.py index 2448587245f9..c93569182c08 100755 --- a/cvat-cli/src/cvat_cli/__main__.py +++ b/cvat-cli/src/cvat_cli/__main__.py @@ -1,78 +1,37 @@ # Copyright (C) 2020-2022 Intel Corporation -# Copyright (C) 2022 CVAT.ai Corporation +# Copyright (C) 2022-2024 CVAT.ai Corporation # # SPDX-License-Identifier: MIT +import argparse import logging import sys -from http.client import HTTPConnection -from types import SimpleNamespace -from typing import List +import urllib3.exceptions from cvat_sdk import exceptions -from cvat_sdk.core.client import Client, Config -from cvat_cli.cli import CLI -from cvat_cli.parser import get_action_args, make_cmdline_parser +from ._internal.commands_all import COMMANDS +from ._internal.common import build_client, configure_common_arguments, configure_logger +from ._internal.utils import popattr logger = logging.getLogger(__name__) -def configure_logger(level): - formatter = logging.Formatter( - "[%(asctime)s] %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S", style="%" - ) - handler = logging.StreamHandler(sys.stdout) - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.setLevel(level) - if level <= logging.DEBUG: - HTTPConnection.debuglevel = 1 +def main(args: list[str] = None): + parser = argparse.ArgumentParser(description=COMMANDS.description) + configure_common_arguments(parser) + COMMANDS.configure_parser(parser) - -def build_client(parsed_args: SimpleNamespace, logger: logging.Logger) -> Client: - config = Config(verify_ssl=not parsed_args.insecure) - - url = parsed_args.server_host - if parsed_args.server_port: - url += f":{parsed_args.server_port}" - - client = Client( - url=url, - logger=logger, - config=config, - check_server_version=False, # version is checked after auth to support versions < 2.3 - ) - - client.organization_slug = parsed_args.organization - - return client - - -def main(args: List[str] = None): - actions = { - "create": CLI.tasks_create, - "delete": CLI.tasks_delete, - "ls": CLI.tasks_list, - "frames": CLI.tasks_frames, - "dump": CLI.tasks_dump, - "upload": CLI.tasks_upload, - "export": CLI.tasks_export, - "import": CLI.tasks_import, - "auto-annotate": CLI.tasks_auto_annotate, - } - parser = make_cmdline_parser() parsed_args = parser.parse_args(args) - configure_logger(parsed_args.loglevel) - with build_client(parsed_args, logger=logger) as client: - action_args = get_action_args(parser, parsed_args) - try: - cli = CLI(client=client, credentials=parsed_args.auth) - actions[parsed_args.action](cli, **vars(action_args)) - except exceptions.ApiException as e: - logger.critical(e) - return 1 + configure_logger(logger, parsed_args) + + try: + with build_client(parsed_args, logger=logger) as client: + popattr(parsed_args, "_executor")(client, **vars(parsed_args)) + except (exceptions.ApiException, urllib3.exceptions.HTTPError) as e: + logger.critical(e) + return 1 return 0 diff --git a/cvat-cli/src/cvat_cli/_internal/__init__.py b/cvat-cli/src/cvat_cli/_internal/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/cvat-cli/src/cvat_cli/_internal/command_base.py b/cvat-cli/src/cvat_cli/_internal/command_base.py new file mode 100644 index 000000000000..94e13f3f16e9 --- /dev/null +++ b/cvat-cli/src/cvat_cli/_internal/command_base.py @@ -0,0 +1,126 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import argparse +import json +import textwrap +import types +from abc import ABCMeta, abstractmethod +from collections.abc import Mapping, Sequence +from typing import Callable, Protocol + +from cvat_sdk import Client + + +class Command(Protocol): + @property + def description(self) -> str: ... + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: ... + + # The exact parameters accepted by `execute` vary between commands, + # so we're forced to declare it like this instead of as a method. + @property + def execute(self) -> Callable[..., None]: ... + + +class CommandGroup: + def __init__(self, *, description: str) -> None: + self._commands: dict[str, Command] = {} + self.description = description + + def command_class(self, name: str): + def decorator(cls: type): + self._commands[name] = cls() + return cls + + return decorator + + def add_command(self, name: str, command: Command) -> None: + self._commands[name] = command + + @property + def commands(self) -> Mapping[str, Command]: + return types.MappingProxyType(self._commands) + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + subparsers = parser.add_subparsers(required=True) + + for name, command in self._commands.items(): + subparser = subparsers.add_parser(name, description=command.description) + subparser.set_defaults(_executor=command.execute) + command.configure_parser(subparser) + + def execute(self) -> None: + # It should be impossible for a command group to be executed, + # because configure_parser requires that a subcommand is specified. + assert False, "unreachable code" + + +class DeprecatedAlias: + def __init__(self, command: Command, replacement: str) -> None: + self._command = command + self._replacement = replacement + + @property + def description(self) -> str: + return textwrap.dedent( + f"""\ + {self._command.description} + (Deprecated; use "{self._replacement}" instead.) + """ + ) + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + self._command.configure_parser(parser) + + def execute(self, client: Client, **kwargs) -> None: + client.logger.warning('This command is deprecated. Use "%s" instead.', self._replacement) + self._command.execute(client, **kwargs) + + +class GenericCommand(metaclass=ABCMeta): + @abstractmethod + def repo(self, client: Client): ... + + @property + @abstractmethod + def resource_type_str(self) -> str: ... + + +class GenericListCommand(GenericCommand): + @property + def description(self) -> str: + return f"List all CVAT {self.resource_type_str}s in either basic or JSON format." + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + parser.add_argument( + "--json", + dest="use_json_output", + default=False, + action="store_true", + help="output JSON data", + ) + + def execute(self, client: Client, *, use_json_output: bool = False): + results = self.repo(client).list(return_json=use_json_output) + if use_json_output: + print(json.dumps(json.loads(results), indent=2)) + else: + for r in results: + print(r.id) + + +class GenericDeleteCommand(GenericCommand): + @property + def description(self): + return f"Delete a list of {self.resource_type_str}s, ignoring those which don't exist." + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + parser.add_argument( + "ids", type=int, help=f"list of {self.resource_type_str} IDs", nargs="+" + ) + + def execute(self, client: Client, *, ids: Sequence[int]) -> None: + self.repo(client).remove_by_ids(ids) diff --git a/cvat-cli/src/cvat_cli/_internal/commands_all.py b/cvat-cli/src/cvat_cli/_internal/commands_all.py new file mode 100644 index 000000000000..758d6b1d05e8 --- /dev/null +++ b/cvat-cli/src/cvat_cli/_internal/commands_all.py @@ -0,0 +1,27 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +from .command_base import CommandGroup, DeprecatedAlias +from .commands_projects import COMMANDS as COMMANDS_PROJECTS +from .commands_tasks import COMMANDS as COMMANDS_TASKS + +COMMANDS = CommandGroup(description="Perform operations on CVAT resources.") + +COMMANDS.add_command("project", COMMANDS_PROJECTS) +COMMANDS.add_command("task", COMMANDS_TASKS) + +_legacy_mapping = { + "create": "create", + "ls": "ls", + "delete": "delete", + "frames": "frames", + "dump": "export-dataset", + "upload": "import-dataset", + "export": "backup", + "import": "create-from-backup", + "auto-annotate": "auto-annotate", +} + +for _legacy, _new in _legacy_mapping.items(): + COMMANDS.add_command(_legacy, DeprecatedAlias(COMMANDS_TASKS.commands[_new], f"task {_new}")) diff --git a/cvat-cli/src/cvat_cli/_internal/commands_projects.py b/cvat-cli/src/cvat_cli/_internal/commands_projects.py new file mode 100644 index 000000000000..b6c39eeef434 --- /dev/null +++ b/cvat-cli/src/cvat_cli/_internal/commands_projects.py @@ -0,0 +1,91 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import argparse +import textwrap + +from cvat_sdk import Client, models + +from .command_base import CommandGroup, GenericCommand, GenericDeleteCommand, GenericListCommand +from .parsers import parse_label_arg + +COMMANDS = CommandGroup(description="Perform operations on CVAT projects.") + + +class GenericProjectCommand(GenericCommand): + resource_type_str = "project" + + def repo(self, client: Client): + return client.projects + + +@COMMANDS.command_class("ls") +class ProjectList(GenericListCommand, GenericProjectCommand): + pass + + +@COMMANDS.command_class("create") +class ProjectCreate: + description = textwrap.dedent( + """\ + Create a new CVAT project, optionally importing a dataset. + """ + ) + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + parser.add_argument("name", type=str, help="name of the project") + parser.add_argument( + "--bug_tracker", "--bug", default=argparse.SUPPRESS, type=str, help="bug tracker URL" + ) + parser.add_argument( + "--labels", + default=[], + type=parse_label_arg, + help="string or file containing JSON labels specification (default: %(default)s)", + ) + parser.add_argument( + "--dataset_path", + default="", + type=str, + help="path to the dataset file to import", + ) + parser.add_argument( + "--dataset_format", + default="CVAT 1.1", + type=str, + help="format of the dataset file being uploaded" + " (only applies when --dataset_path is specified; default: %(default)s)", + ) + parser.add_argument( + "--completion_verification_period", + dest="status_check_period", + default=2, + type=float, + help="period between status checks" + " (only applies when --dataset_path is specified; default: %(default)s)", + ) + + def execute( + self, + client: Client, + *, + name: str, + labels: dict, + dataset_path: str, + dataset_format: str, + status_check_period: int, + **kwargs, + ) -> None: + project = client.projects.create_from_dataset( + spec=models.ProjectWriteRequest(name=name, labels=labels, **kwargs), + dataset_path=dataset_path, + dataset_format=dataset_format, + status_check_period=status_check_period, + ) + print(project.id) + + +@COMMANDS.command_class("delete") +class ProjectDelete(GenericDeleteCommand, GenericProjectCommand): + pass diff --git a/cvat-cli/src/cvat_cli/_internal/commands_tasks.py b/cvat-cli/src/cvat_cli/_internal/commands_tasks.py new file mode 100644 index 000000000000..8c6782887d97 --- /dev/null +++ b/cvat-cli/src/cvat_cli/_internal/commands_tasks.py @@ -0,0 +1,507 @@ +# Copyright (C) 2022-2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +from __future__ import annotations + +import argparse +import importlib +import importlib.util +import textwrap +from collections.abc import Sequence +from pathlib import Path +from typing import Any, Optional + +import cvat_sdk.auto_annotation as cvataa +from attr.converters import to_bool +from cvat_sdk import Client, models +from cvat_sdk.core.helpers import DeferredTqdmProgressReporter +from cvat_sdk.core.proxies.tasks import ResourceType + +from .command_base import CommandGroup, GenericCommand, GenericDeleteCommand, GenericListCommand +from .parsers import ( + BuildDictAction, + parse_function_parameter, + parse_label_arg, + parse_resource_type, + parse_threshold, +) + +COMMANDS = CommandGroup(description="Perform operations on CVAT tasks.") + + +class GenericTaskCommand(GenericCommand): + resource_type_str = "task" + + def repo(self, client: Client): + return client.tasks + + +@COMMANDS.command_class("ls") +class TaskList(GenericListCommand, GenericTaskCommand): + pass + + +@COMMANDS.command_class("create") +class TaskCreate: + description = textwrap.dedent( + """\ + Create a new CVAT task. To create a task, you need + to specify labels using the --labels argument or + attach the task to an existing project using the + --project_id argument. + """ + ) + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + parser.add_argument("name", type=str, help="name of the task") + parser.add_argument( + "resource_type", + default="local", + choices=list(ResourceType), + type=parse_resource_type, + help="type of files specified", + ) + parser.add_argument("resources", type=str, help="list of paths or URLs", nargs="+") + parser.add_argument( + "--annotation_path", default="", type=str, help="path to annotation file" + ) + parser.add_argument( + "--annotation_format", + default="CVAT 1.1", + type=str, + help="format of the annotation file being uploaded, e.g. CVAT 1.1", + ) + parser.add_argument( + "--bug_tracker", "--bug", default=argparse.SUPPRESS, type=str, help="bug tracker URL" + ) + parser.add_argument( + "--chunk_size", + default=argparse.SUPPRESS, + type=int, + help="the number of frames per chunk", + ) + parser.add_argument( + "--completion_verification_period", + dest="status_check_period", + default=2, + type=float, + help=textwrap.dedent( + """\ + number of seconds to wait until checking + if data compression finished (necessary before uploading annotations) + """ + ), + ) + parser.add_argument( + "--copy_data", + default=False, + action="store_true", + help=textwrap.dedent( + """\ + set the option to copy the data, only used when resource type is + share (default: %(default)s) + """ + ), + ) + parser.add_argument( + "--frame_step", + default=argparse.SUPPRESS, + type=int, + help=textwrap.dedent( + """\ + set the frame step option in the advanced configuration + when uploading image series or videos + """ + ), + ) + parser.add_argument( + "--image_quality", + default=70, + type=int, + help=textwrap.dedent( + """\ + set the image quality option in the advanced configuration + when creating tasks.(default: %(default)s) + """ + ), + ) + parser.add_argument( + "--labels", + default="[]", + type=parse_label_arg, + help="string or file containing JSON labels specification", + ) + parser.add_argument( + "--project_id", default=argparse.SUPPRESS, type=int, help="project ID if project exists" + ) + parser.add_argument( + "--overlap", + default=argparse.SUPPRESS, + type=int, + help="the number of intersected frames between different segments", + ) + parser.add_argument( + "--segment_size", + default=argparse.SUPPRESS, + type=int, + help="the number of frames in a segment", + ) + parser.add_argument( + "--sorting-method", + default="lexicographical", + choices=["lexicographical", "natural", "predefined", "random"], + help="""data soring method (default: %(default)s)""", + ) + parser.add_argument( + "--start_frame", + default=argparse.SUPPRESS, + type=int, + help="the start frame of the video", + ) + parser.add_argument( + "--stop_frame", default=argparse.SUPPRESS, type=int, help="the stop frame of the video" + ) + parser.add_argument( + "--use_cache", + action="store_true", + help="""use cache""", # automatically sets default=False + ) + parser.add_argument( + "--use_zip_chunks", + action="store_true", # automatically sets default=False + help="""zip chunks before sending them to the server""", + ) + parser.add_argument( + "--cloud_storage_id", + default=argparse.SUPPRESS, + type=int, + help="cloud storage ID if you would like to use data from cloud storage", + ) + parser.add_argument( + "--filename_pattern", + default=argparse.SUPPRESS, + type=str, + help=textwrap.dedent( + """\ + pattern for filtering data from the manifest file for the upload. + Only shell-style wildcards are supported: + * - matches everything; + ? - matches any single character; + [seq] - matches any character in 'seq'; + [!seq] - matches any character not in seq + """ + ), + ) + + def execute( + self, + client, + *, + name: str, + labels: list[dict[str, str]], + resources: Sequence[str], + resource_type: ResourceType, + annotation_path: str, + annotation_format: str, + status_check_period: int, + **kwargs, + ) -> None: + task_params = {} + data_params = {} + + for k, v in kwargs.items(): + if k in models.DataRequest.attribute_map or k == "frame_step": + data_params[k] = v + else: + task_params[k] = v + + task = client.tasks.create_from_data( + spec=models.TaskWriteRequest(name=name, labels=labels, **task_params), + resource_type=resource_type, + resources=resources, + data_params=data_params, + annotation_path=annotation_path, + annotation_format=annotation_format, + status_check_period=status_check_period, + pbar=DeferredTqdmProgressReporter(), + ) + print(task.id) + + +@COMMANDS.command_class("delete") +class TaskDelete(GenericDeleteCommand, GenericTaskCommand): + pass + + +@COMMANDS.command_class("frames") +class TaskFrames: + description = textwrap.dedent( + """\ + Download the requested frame numbers for a task and save images as + task__frame_.jpg. + """ + ) + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + parser.add_argument("task_id", type=int, help="task ID") + parser.add_argument("frame_ids", type=int, help="list of frame IDs to download", nargs="+") + parser.add_argument( + "--outdir", type=str, default="", help="directory to save images (default: CWD)" + ) + parser.add_argument( + "--quality", + type=str, + choices=("original", "compressed"), + default="original", + help="choose quality of images (default: %(default)s)", + ) + + def execute( + self, + client: Client, + *, + task_id: int, + frame_ids: Sequence[int], + outdir: str, + quality: str, + ) -> None: + client.tasks.retrieve(obj_id=task_id).download_frames( + frame_ids=frame_ids, + outdir=outdir, + quality=quality, + filename_pattern=f"task_{task_id}" + "_frame_{frame_id:06d}{frame_ext}", + ) + + +@COMMANDS.command_class("export-dataset") +class TaskExportDataset: + description = textwrap.dedent( + """\ + Export a task as a dataset in the specified format (e.g. 'YOLO 1.1'). + """ + ) + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + parser.add_argument("task_id", type=int, help="task ID") + parser.add_argument("filename", type=str, help="output file") + parser.add_argument( + "--format", + dest="fileformat", + type=str, + default="CVAT for images 1.1", + help="annotation format (default: %(default)s)", + ) + parser.add_argument( + "--completion_verification_period", + dest="status_check_period", + default=2, + type=float, + help="number of seconds to wait until checking if dataset building finished", + ) + parser.add_argument( + "--with-images", + type=to_bool, + default=False, + dest="include_images", + help="Whether to include images or not (default: %(default)s)", + ) + + def execute( + self, + client: Client, + *, + task_id: int, + fileformat: str, + filename: str, + status_check_period: int, + include_images: bool, + ) -> None: + client.tasks.retrieve(obj_id=task_id).export_dataset( + format_name=fileformat, + filename=filename, + pbar=DeferredTqdmProgressReporter(), + status_check_period=status_check_period, + include_images=include_images, + ) + + +@COMMANDS.command_class("import-dataset") +class TaskImportDataset: + description = textwrap.dedent( + """\ + Import annotations into a task from a dataset in the specified format + (e.g. 'YOLO 1.1'). + """ + ) + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + parser.add_argument("task_id", type=int, help="task ID") + parser.add_argument("filename", type=str, help="upload file") + parser.add_argument( + "--format", + dest="fileformat", + type=str, + default="CVAT 1.1", + help="annotation format (default: %(default)s)", + ) + + def execute( + self, + client: Client, + *, + task_id: int, + fileformat: str, + filename: str, + ) -> None: + client.tasks.retrieve(obj_id=task_id).import_annotations( + format_name=fileformat, + filename=filename, + pbar=DeferredTqdmProgressReporter(), + ) + + +@COMMANDS.command_class("backup") +class TaskBackup: + description = """Download a task backup.""" + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + parser.add_argument("task_id", type=int, help="task ID") + parser.add_argument("filename", type=str, help="output file") + parser.add_argument( + "--completion_verification_period", + dest="status_check_period", + default=2, + type=float, + help="time interval between checks if archive building has been finished, in seconds", + ) + + def execute( + self, client: Client, *, task_id: int, filename: str, status_check_period: int + ) -> None: + client.tasks.retrieve(obj_id=task_id).download_backup( + filename=filename, + status_check_period=status_check_period, + pbar=DeferredTqdmProgressReporter(), + ) + + +@COMMANDS.command_class("create-from-backup") +class TaskCreateFromBackup: + description = """Create a task from a backup file.""" + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + parser.add_argument("filename", type=str, help="upload file") + parser.add_argument( + "--completion_verification_period", + dest="status_check_period", + default=2, + type=float, + help="time interval between checks if archive processing was finished, in seconds", + ) + + def execute(self, client: Client, *, filename: str, status_check_period: int) -> None: + task = client.tasks.create_from_backup( + filename=filename, + status_check_period=status_check_period, + pbar=DeferredTqdmProgressReporter(), + ) + print(task.id) + + +@COMMANDS.command_class("auto-annotate") +class TaskAutoAnnotate: + description = "Automatically annotate a CVAT task by running a function on the local machine." + + def configure_parser(self, parser: argparse.ArgumentParser) -> None: + parser.add_argument("task_id", type=int, help="task ID") + + function_group = parser.add_mutually_exclusive_group(required=True) + + function_group.add_argument( + "--function-module", + metavar="MODULE", + help="qualified name of a module to use as the function", + ) + + function_group.add_argument( + "--function-file", + metavar="PATH", + type=Path, + help="path to a Python source file to use as the function", + ) + + parser.add_argument( + "--function-parameter", + "-p", + metavar="NAME=TYPE:VALUE", + type=parse_function_parameter, + action=BuildDictAction, + dest="function_parameters", + help="parameter for the function", + ) + + parser.add_argument( + "--clear-existing", + action="store_true", + help="Remove existing annotations from the task", + ) + + parser.add_argument( + "--allow-unmatched-labels", + action="store_true", + help="Allow the function to declare labels not configured in the task", + ) + + parser.add_argument( + "--conf-threshold", + type=parse_threshold, + help="Confidence threshold for filtering detections", + default=None, + ) + + parser.add_argument( + "--conv-mask-to-poly", + action="store_true", + help="Convert mask shapes to polygon shapes", + ) + + def execute( + self, + client: Client, + *, + task_id: int, + function_module: Optional[str] = None, + function_file: Optional[Path] = None, + function_parameters: dict[str, Any], + clear_existing: bool = False, + allow_unmatched_labels: bool = False, + conf_threshold: Optional[float], + conv_mask_to_poly: bool, + ) -> None: + if function_module is not None: + function = importlib.import_module(function_module) + elif function_file is not None: + module_spec = importlib.util.spec_from_file_location("__cvat_function__", function_file) + function = importlib.util.module_from_spec(module_spec) + module_spec.loader.exec_module(function) + else: + assert False, "function identification arguments missing" + + if hasattr(function, "create"): + # this is actually a function factory + function = function.create(**function_parameters) + else: + if function_parameters: + raise TypeError("function takes no parameters") + + cvataa.annotate_task( + client, + task_id, + function, + pbar=DeferredTqdmProgressReporter(), + clear_existing=clear_existing, + allow_unmatched_labels=allow_unmatched_labels, + conf_threshold=conf_threshold, + conv_mask_to_poly=conv_mask_to_poly, + ) diff --git a/cvat-cli/src/cvat_cli/_internal/common.py b/cvat-cli/src/cvat_cli/_internal/common.py new file mode 100644 index 000000000000..6f37e3d74eaa --- /dev/null +++ b/cvat-cli/src/cvat_cli/_internal/common.py @@ -0,0 +1,104 @@ +# Copyright (C) 2021-2022 Intel Corporation +# Copyright (C) 2022-2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import argparse +import getpass +import logging +import os +import sys +from http.client import HTTPConnection + +from cvat_sdk.core.client import Client, Config + +from ..version import VERSION +from .utils import popattr + + +def get_auth(s): + """Parse USER[:PASS] strings and prompt for password if none was + supplied.""" + user, _, password = s.partition(":") + password = password or os.environ.get("PASS") or getpass.getpass() + return user, password + + +def configure_common_arguments(parser: argparse.ArgumentParser) -> None: + parser.add_argument("--version", action="version", version=VERSION) + parser.add_argument( + "--insecure", + action="store_true", + help="Allows to disable SSL certificate check", + ) + + parser.add_argument( + "--auth", + type=get_auth, + metavar="USER:[PASS]", + default=getpass.getuser(), + help="""defaults to the current user and supports the PASS + environment variable or password prompt + (default user: %(default)s).""", + ) + parser.add_argument( + "--server-host", type=str, default="localhost", help="host (default: %(default)s)" + ) + parser.add_argument( + "--server-port", + type=int, + default=None, + help="port (default: 80 for http and 443 for https connections)", + ) + parser.add_argument( + "--organization", + "--org", + metavar="SLUG", + help="""short name (slug) of the organization + to use when listing or creating resources; + set to blank string to use the personal workspace + (default: list all accessible objects, create in personal workspace)""", + ) + parser.add_argument( + "--debug", + action="store_const", + dest="loglevel", + const=logging.DEBUG, + default=logging.INFO, + help="show debug output", + ) + + +def configure_logger(logger: logging.Logger, parsed_args: argparse.Namespace) -> None: + level = popattr(parsed_args, "loglevel") + formatter = logging.Formatter( + "[%(asctime)s] %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S", style="%" + ) + handler = logging.StreamHandler(sys.stderr) + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(level) + if level <= logging.DEBUG: + HTTPConnection.debuglevel = 1 + + +def build_client(parsed_args: argparse.Namespace, logger: logging.Logger) -> Client: + config = Config(verify_ssl=not popattr(parsed_args, "insecure")) + + url = popattr(parsed_args, "server_host") + if server_port := popattr(parsed_args, "server_port"): + url += f":{server_port}" + + client = Client( + url=url, + logger=logger, + config=config, + check_server_version=False, # version is checked after auth to support versions < 2.3 + ) + + client.login(popattr(parsed_args, "auth")) + client.check_server_version(fail_if_unsupported=False) + + client.organization_slug = popattr(parsed_args, "organization") + + return client diff --git a/cvat-cli/src/cvat_cli/_internal/parsers.py b/cvat-cli/src/cvat_cli/_internal/parsers.py new file mode 100644 index 000000000000..97dcb5b2668a --- /dev/null +++ b/cvat-cli/src/cvat_cli/_internal/parsers.py @@ -0,0 +1,73 @@ +# Copyright (C) 2021-2022 Intel Corporation +# Copyright (C) 2022-2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import argparse +import json +import os.path +from typing import Any + +from attr.converters import to_bool +from cvat_sdk.core.proxies.tasks import ResourceType + + +def parse_resource_type(s: str) -> ResourceType: + try: + return ResourceType[s.upper()] + except KeyError: + return s + + +def parse_label_arg(s): + """If s is a file load it as JSON, otherwise parse s as JSON.""" + if os.path.exists(s): + with open(s, "r") as fp: + return json.load(fp) + else: + return json.loads(s) + + +def parse_function_parameter(s: str) -> tuple[str, Any]: + key, sep, type_and_value = s.partition("=") + + if not sep: + raise argparse.ArgumentTypeError("parameter value not specified") + + type_, sep, value = type_and_value.partition(":") + + if not sep: + raise argparse.ArgumentTypeError("parameter type not specified") + + if type_ == "int": + value = int(value) + elif type_ == "float": + value = float(value) + elif type_ == "str": + pass + elif type_ == "bool": + value = to_bool(value) + else: + raise argparse.ArgumentTypeError(f"unsupported parameter type {type_!r}") + + return (key, value) + + +def parse_threshold(s: str) -> float: + try: + value = float(s) + except ValueError as e: + raise argparse.ArgumentTypeError("must be a number") from e + + if not 0 <= value <= 1: + raise argparse.ArgumentTypeError("must be between 0 and 1") + return value + + +class BuildDictAction(argparse.Action): + def __init__(self, option_strings, dest, default=None, **kwargs): + super().__init__(option_strings, dest, default=default or {}, **kwargs) + + def __call__(self, parser, namespace, values, option_string=None): + key, value = values + getattr(namespace, self.dest)[key] = value diff --git a/cvat-cli/src/cvat_cli/_internal/utils.py b/cvat-cli/src/cvat_cli/_internal/utils.py new file mode 100644 index 000000000000..b541534790c4 --- /dev/null +++ b/cvat-cli/src/cvat_cli/_internal/utils.py @@ -0,0 +1,9 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + + +def popattr(obj, name): + value = getattr(obj, name) + delattr(obj, name) + return value diff --git a/cvat-cli/src/cvat_cli/cli.py b/cvat-cli/src/cvat_cli/cli.py deleted file mode 100644 index e7945b18bb2e..000000000000 --- a/cvat-cli/src/cvat_cli/cli.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (C) 2022 CVAT.ai Corporation -# -# SPDX-License-Identifier: MIT - -from __future__ import annotations - -import importlib -import importlib.util -import json -from pathlib import Path -from typing import Any, Dict, List, Optional, Sequence, Tuple - -import cvat_sdk.auto_annotation as cvataa -from cvat_sdk import Client, models -from cvat_sdk.core.helpers import DeferredTqdmProgressReporter -from cvat_sdk.core.proxies.tasks import ResourceType - - -class CLI: - def __init__(self, client: Client, credentials: Tuple[str, str]): - self.client = client - - self.client.login(credentials) - - self.client.check_server_version(fail_if_unsupported=False) - - def tasks_list(self, *, use_json_output: bool = False, **kwargs): - """List all tasks in either basic or JSON format.""" - results = self.client.tasks.list(return_json=use_json_output, **kwargs) - if use_json_output: - print(json.dumps(json.loads(results), indent=2)) - else: - for r in results: - print(r.id) - - def tasks_create( - self, - name: str, - labels: List[Dict[str, str]], - resources: Sequence[str], - *, - resource_type: ResourceType = ResourceType.LOCAL, - annotation_path: str = "", - annotation_format: str = "CVAT XML 1.1", - status_check_period: int = 2, - **kwargs, - ) -> None: - """ - Create a new task with the given name and labels JSON and add the files to it. - """ - - task_params = {} - data_params = {} - - for k, v in kwargs.items(): - if k in models.DataRequest.attribute_map or k == "frame_step": - data_params[k] = v - else: - task_params[k] = v - - task = self.client.tasks.create_from_data( - spec=models.TaskWriteRequest(name=name, labels=labels, **task_params), - resource_type=resource_type, - resources=resources, - data_params=data_params, - annotation_path=annotation_path, - annotation_format=annotation_format, - status_check_period=status_check_period, - pbar=DeferredTqdmProgressReporter(), - ) - print("Created task id", task.id) - - def tasks_delete(self, task_ids: Sequence[int]) -> None: - """Delete a list of tasks, ignoring those which don't exist.""" - self.client.tasks.remove_by_ids(task_ids=task_ids) - - def tasks_frames( - self, - task_id: int, - frame_ids: Sequence[int], - *, - outdir: str = "", - quality: str = "original", - ) -> None: - """ - Download the requested frame numbers for a task and save images as - task__frame_.jpg. - """ - self.client.tasks.retrieve(obj_id=task_id).download_frames( - frame_ids=frame_ids, - outdir=outdir, - quality=quality, - filename_pattern=f"task_{task_id}" + "_frame_{frame_id:06d}{frame_ext}", - ) - - def tasks_dump( - self, - task_id: int, - fileformat: str, - filename: str, - *, - status_check_period: int = 2, - include_images: bool = False, - ) -> None: - """ - Download annotations for a task in the specified format (e.g. 'YOLO ZIP 1.0'). - """ - self.client.tasks.retrieve(obj_id=task_id).export_dataset( - format_name=fileformat, - filename=filename, - pbar=DeferredTqdmProgressReporter(), - status_check_period=status_check_period, - include_images=include_images, - ) - - def tasks_upload( - self, task_id: str, fileformat: str, filename: str, *, status_check_period: int = 2 - ) -> None: - """Upload annotations for a task in the specified format - (e.g. 'YOLO ZIP 1.0').""" - self.client.tasks.retrieve(obj_id=task_id).import_annotations( - format_name=fileformat, - filename=filename, - status_check_period=status_check_period, - pbar=DeferredTqdmProgressReporter(), - ) - - def tasks_export(self, task_id: str, filename: str, *, status_check_period: int = 2) -> None: - """Download a task backup""" - self.client.tasks.retrieve(obj_id=task_id).download_backup( - filename=filename, - status_check_period=status_check_period, - pbar=DeferredTqdmProgressReporter(), - ) - - def tasks_import(self, filename: str, *, status_check_period: int = 2) -> None: - """Import a task from a backup file""" - self.client.tasks.create_from_backup( - filename=filename, - status_check_period=status_check_period, - pbar=DeferredTqdmProgressReporter(), - ) - - def tasks_auto_annotate( - self, - task_id: int, - *, - function_module: Optional[str] = None, - function_file: Optional[Path] = None, - function_parameters: Dict[str, Any], - clear_existing: bool = False, - allow_unmatched_labels: bool = False, - ) -> None: - if function_module is not None: - function = importlib.import_module(function_module) - elif function_file is not None: - module_spec = importlib.util.spec_from_file_location("__cvat_function__", function_file) - function = importlib.util.module_from_spec(module_spec) - module_spec.loader.exec_module(function) - else: - assert False, "function identification arguments missing" - - if hasattr(function, "create"): - # this is actually a function factory - function = function.create(**function_parameters) - else: - if function_parameters: - raise TypeError("function takes no parameters") - - cvataa.annotate_task( - self.client, - task_id, - function, - pbar=DeferredTqdmProgressReporter(), - clear_existing=clear_existing, - allow_unmatched_labels=allow_unmatched_labels, - ) diff --git a/cvat-cli/src/cvat_cli/parser.py b/cvat-cli/src/cvat_cli/parser.py deleted file mode 100644 index d456b087cd65..000000000000 --- a/cvat-cli/src/cvat_cli/parser.py +++ /dev/null @@ -1,452 +0,0 @@ -# Copyright (C) 2021-2022 Intel Corporation -# Copyright (C) 2022 CVAT.ai Corporation -# -# SPDX-License-Identifier: MIT - -import argparse -import getpass -import json -import logging -import os -import textwrap -from pathlib import Path -from typing import Any, Tuple - -from attr.converters import to_bool -from cvat_sdk.core.proxies.tasks import ResourceType - -from .version import VERSION - - -def get_auth(s): - """Parse USER[:PASS] strings and prompt for password if none was - supplied.""" - user, _, password = s.partition(":") - password = password or os.environ.get("PASS") or getpass.getpass() - return user, password - - -def parse_label_arg(s): - """If s is a file load it as JSON, otherwise parse s as JSON.""" - if os.path.exists(s): - with open(s, "r") as fp: - return json.load(fp) - else: - return json.loads(s) - - -def parse_resource_type(s: str) -> ResourceType: - try: - return ResourceType[s.upper()] - except KeyError: - return s - - -def parse_function_parameter(s: str) -> Tuple[str, Any]: - key, sep, type_and_value = s.partition("=") - - if not sep: - raise argparse.ArgumentTypeError("parameter value not specified") - - type_, sep, value = type_and_value.partition(":") - - if not sep: - raise argparse.ArgumentTypeError("parameter type not specified") - - if type_ == "int": - value = int(value) - elif type_ == "float": - value = float(value) - elif type_ == "str": - pass - elif type_ == "bool": - value = to_bool(value) - else: - raise argparse.ArgumentTypeError(f"unsupported parameter type {type_!r}") - - return (key, value) - - -class BuildDictAction(argparse.Action): - def __init__(self, option_strings, dest, default=None, **kwargs): - super().__init__(option_strings, dest, default=default or {}, **kwargs) - - def __call__(self, parser, namespace, values, option_string=None): - key, value = values - getattr(namespace, self.dest)[key] = value - - -def make_cmdline_parser() -> argparse.ArgumentParser: - ####################################################################### - # Command line interface definition - ####################################################################### - parser = argparse.ArgumentParser( - description="Perform common operations related to CVAT tasks.\n\n" - ) - parser.add_argument("--version", action="version", version=VERSION) - parser.add_argument( - "--insecure", - action="store_true", - help="Allows to disable SSL certificate check", - ) - - task_subparser = parser.add_subparsers(dest="action") - - ####################################################################### - # Positional arguments - ####################################################################### - parser.add_argument( - "--auth", - type=get_auth, - metavar="USER:[PASS]", - default=getpass.getuser(), - help="""defaults to the current user and supports the PASS - environment variable or password prompt - (default user: %(default)s).""", - ) - parser.add_argument( - "--server-host", type=str, default="localhost", help="host (default: %(default)s)" - ) - parser.add_argument( - "--server-port", - type=int, - default=None, - help="port (default: 80 for http and 443 for https connections)", - ) - parser.add_argument( - "--organization", - "--org", - metavar="SLUG", - help="""short name (slug) of the organization - to use when listing or creating resources; - set to blank string to use the personal workspace - (default: list all accessible objects, create in personal workspace)""", - ) - parser.add_argument( - "--debug", - action="store_const", - dest="loglevel", - const=logging.DEBUG, - default=logging.INFO, - help="show debug output", - ) - - ####################################################################### - # Create - ####################################################################### - task_create_parser = task_subparser.add_parser( - "create", - description=textwrap.dedent( - """\ - Create a new CVAT task. To create a task, you need - to specify labels using the --labels argument or - attach the task to an existing project using the - --project_id argument. - """ - ), - formatter_class=argparse.RawTextHelpFormatter, - ) - task_create_parser.add_argument("name", type=str, help="name of the task") - task_create_parser.add_argument( - "resource_type", - default="local", - choices=list(ResourceType), - type=parse_resource_type, - help="type of files specified", - ) - task_create_parser.add_argument("resources", type=str, help="list of paths or URLs", nargs="+") - task_create_parser.add_argument( - "--annotation_path", default="", type=str, help="path to annotation file" - ) - task_create_parser.add_argument( - "--annotation_format", - default="CVAT 1.1", - type=str, - help="format of the annotation file being uploaded, e.g. CVAT 1.1", - ) - task_create_parser.add_argument( - "--bug_tracker", "--bug", default=None, type=str, help="bug tracker URL" - ) - task_create_parser.add_argument( - "--chunk_size", default=None, type=int, help="the number of frames per chunk" - ) - task_create_parser.add_argument( - "--completion_verification_period", - dest="status_check_period", - default=2, - type=float, - help=textwrap.dedent( - """\ - number of seconds to wait until checking - if data compression finished (necessary before uploading annotations) - """ - ), - ) - task_create_parser.add_argument( - "--copy_data", - default=False, - action="store_true", - help=textwrap.dedent( - """\ - set the option to copy the data, only used when resource type is - share (default: %(default)s) - """ - ), - ) - task_create_parser.add_argument( - "--frame_step", - default=None, - type=int, - help=textwrap.dedent( - """\ - set the frame step option in the advanced configuration - when uploading image series or videos (default: %(default)s) - """ - ), - ) - task_create_parser.add_argument( - "--image_quality", - default=70, - type=int, - help=textwrap.dedent( - """\ - set the image quality option in the advanced configuration - when creating tasks.(default: %(default)s) - """ - ), - ) - task_create_parser.add_argument( - "--labels", - default="[]", - type=parse_label_arg, - help="string or file containing JSON labels specification", - ) - task_create_parser.add_argument( - "--project_id", default=None, type=int, help="project ID if project exists" - ) - task_create_parser.add_argument( - "--overlap", - default=None, - type=int, - help="the number of intersected frames between different segments", - ) - task_create_parser.add_argument( - "--segment_size", default=None, type=int, help="the number of frames in a segment" - ) - task_create_parser.add_argument( - "--sorting-method", - default="lexicographical", - choices=["lexicographical", "natural", "predefined", "random"], - help="""data soring method (default: %(default)s)""", - ) - task_create_parser.add_argument( - "--start_frame", default=None, type=int, help="the start frame of the video" - ) - task_create_parser.add_argument( - "--stop_frame", default=None, type=int, help="the stop frame of the video" - ) - task_create_parser.add_argument( - "--use_cache", action="store_true", help="""use cache""" # automatically sets default=False - ) - task_create_parser.add_argument( - "--use_zip_chunks", - action="store_true", # automatically sets default=False - help="""zip chunks before sending them to the server""", - ) - task_create_parser.add_argument( - "--cloud_storage_id", - default=None, - type=int, - help="cloud storage ID if you would like to use data from cloud storage", - ) - task_create_parser.add_argument( - "--filename_pattern", - type=str, - help=textwrap.dedent( - """\ - pattern for filtering data from the manifest file for the upload. - Only shell-style wildcards are supported: - * - matches everything - ? - matches any single character - [seq] - matches any character in 'seq' - [!seq] - matches any character not in seq - """ - ), - ) - - ####################################################################### - # Delete - ####################################################################### - delete_parser = task_subparser.add_parser("delete", description="Delete a CVAT task.") - delete_parser.add_argument("task_ids", type=int, help="list of task IDs", nargs="+") - - ####################################################################### - # List - ####################################################################### - ls_parser = task_subparser.add_parser( - "ls", description="List all CVAT tasks in simple or JSON format." - ) - ls_parser.add_argument( - "--json", - dest="use_json_output", - default=False, - action="store_true", - help="output JSON data", - ) - - ####################################################################### - # Frames - ####################################################################### - frames_parser = task_subparser.add_parser( - "frames", description="Download all frame images for a CVAT task." - ) - frames_parser.add_argument("task_id", type=int, help="task ID") - frames_parser.add_argument( - "frame_ids", type=int, help="list of frame IDs to download", nargs="+" - ) - frames_parser.add_argument( - "--outdir", type=str, default="", help="directory to save images (default: CWD)" - ) - frames_parser.add_argument( - "--quality", - type=str, - choices=("original", "compressed"), - default="original", - help="choose quality of images (default: %(default)s)", - ) - - ####################################################################### - # Dump - ####################################################################### - dump_parser = task_subparser.add_parser( - "dump", description="Download annotations for a CVAT task." - ) - dump_parser.add_argument("task_id", type=int, help="task ID") - dump_parser.add_argument("filename", type=str, help="output file") - dump_parser.add_argument( - "--format", - dest="fileformat", - type=str, - default="CVAT for images 1.1", - help="annotation format (default: %(default)s)", - ) - dump_parser.add_argument( - "--completion_verification_period", - dest="status_check_period", - default=2, - type=float, - help="number of seconds to wait until checking if dataset building finished", - ) - dump_parser.add_argument( - "--with-images", - type=to_bool, - default=False, - dest="include_images", - help="Whether to include images or not (default: %(default)s)", - ) - - ####################################################################### - # Upload Annotations - ####################################################################### - upload_parser = task_subparser.add_parser( - "upload", description="Upload annotations for a CVAT task." - ) - upload_parser.add_argument("task_id", type=int, help="task ID") - upload_parser.add_argument("filename", type=str, help="upload file") - upload_parser.add_argument( - "--format", - dest="fileformat", - type=str, - default="CVAT 1.1", - help="annotation format (default: %(default)s)", - ) - - ####################################################################### - # Export task - ####################################################################### - export_task_parser = task_subparser.add_parser("export", description="Export a CVAT task.") - export_task_parser.add_argument("task_id", type=int, help="task ID") - export_task_parser.add_argument("filename", type=str, help="output file") - export_task_parser.add_argument( - "--completion_verification_period", - dest="status_check_period", - default=2, - type=float, - help="time interval between checks if archive building has been finished, in seconds", - ) - - ####################################################################### - # Import task - ####################################################################### - import_task_parser = task_subparser.add_parser("import", description="Import a CVAT task.") - import_task_parser.add_argument("filename", type=str, help="upload file") - import_task_parser.add_argument( - "--completion_verification_period", - dest="status_check_period", - default=2, - type=float, - help="time interval between checks if archive processing was finished, in seconds", - ) - - ####################################################################### - # Auto-annotate - ####################################################################### - auto_annotate_task_parser = task_subparser.add_parser( - "auto-annotate", - description="Automatically annotate a CVAT task by running a function on the local machine.", - ) - auto_annotate_task_parser.add_argument("task_id", type=int, help="task ID") - - function_group = auto_annotate_task_parser.add_mutually_exclusive_group(required=True) - - function_group.add_argument( - "--function-module", - metavar="MODULE", - help="qualified name of a module to use as the function", - ) - - function_group.add_argument( - "--function-file", - metavar="PATH", - type=Path, - help="path to a Python source file to use as the function", - ) - - auto_annotate_task_parser.add_argument( - "--function-parameter", - "-p", - metavar="NAME=TYPE:VALUE", - type=parse_function_parameter, - action=BuildDictAction, - dest="function_parameters", - help="parameter for the function", - ) - - auto_annotate_task_parser.add_argument( - "--clear-existing", action="store_true", help="Remove existing annotations from the task" - ) - - auto_annotate_task_parser.add_argument( - "--allow-unmatched-labels", - action="store_true", - help="Allow the function to declare labels not configured in the task", - ) - - return parser - - -def get_action_args( - parser: argparse.ArgumentParser, parsed_args: argparse.Namespace -) -> argparse.Namespace: - # FIXME: a hacky way to remove unnecessary args - action_args = dict(vars(parsed_args)) - - for action in parser._actions: - action_args.pop(action.dest, None) - - # remove default args - for k, v in dict(action_args).items(): - if v is None: - action_args.pop(k, None) - - return argparse.Namespace(**action_args) diff --git a/cvat-cli/src/cvat_cli/version.py b/cvat-cli/src/cvat_cli/version.py index b2829a54b105..c176a6b233ec 100644 --- a/cvat-cli/src/cvat_cli/version.py +++ b/cvat-cli/src/cvat_cli/version.py @@ -1 +1 @@ -VERSION = "2.22.0" +VERSION = "2.24.1" diff --git a/cvat-core/package.json b/cvat-core/package.json index 782d74c15b65..8e27f80f0b98 100644 --- a/cvat-core/package.json +++ b/cvat-core/package.json @@ -1,6 +1,6 @@ { "name": "cvat-core", - "version": "15.2.0", + "version": "15.3.1", "type": "module", "description": "Part of Computer Vision Tool which presents an interface for client-side integration", "main": "src/api.ts", diff --git a/cvat-core/src/annotations-actions.ts b/cvat-core/src/annotations-actions.ts deleted file mode 100644 index 9e956421ae08..000000000000 --- a/cvat-core/src/annotations-actions.ts +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright (C) 2023-2024 CVAT.ai Corporation -// -// SPDX-License-Identifier: MIT - -import { omit, throttle } from 'lodash'; -import { ArgumentError } from './exceptions'; -import { SerializedCollection, SerializedShape } from './server-response-types'; -import { Job, Task } from './session'; -import { EventScope, ObjectType } from './enums'; -import ObjectState from './object-state'; -import { getAnnotations, getCollection } from './annotations'; -import { propagateShapes } from './object-utils'; - -export interface SingleFrameActionInput { - collection: Omit; - frameData: { - width: number; - height: number; - number: number; - }; -} - -export interface SingleFrameActionOutput { - collection: Omit; -} - -export enum ActionParameterType { - SELECT = 'select', - NUMBER = 'number', -} - -// For SELECT values should be a list of possible options -// For NUMBER values should be a list with [min, max, step], -// or a callback ({ instance }: { instance: Job | Task }) => [min, max, step] -type ActionParameters = Record string[]); - defaultValue: string | (({ instance }: { instance: Job | Task }) => string); -}>; - -export enum FrameSelectionType { - SEGMENT = 'segment', - CURRENT_FRAME = 'current_frame', -} - -export default class BaseSingleFrameAction { - /* eslint-disable @typescript-eslint/no-unused-vars */ - public async init( - sessionInstance: Job | Task, - parameters: Record, - ): Promise { - throw new Error('Method not implemented'); - } - - public async destroy(): Promise { - throw new Error('Method not implemented'); - } - - public async run(sessionInstance: Job | Task, input: SingleFrameActionInput): Promise { - throw new Error('Method not implemented'); - } - - public get name(): string { - throw new Error('Method not implemented'); - } - - public get parameters(): ActionParameters | null { - throw new Error('Method not implemented'); - } - - public get frameSelection(): FrameSelectionType { - return FrameSelectionType.SEGMENT; - } -} - -class RemoveFilteredShapes extends BaseSingleFrameAction { - public async init(): Promise { - // nothing to init - } - - public async destroy(): Promise { - // nothing to destroy - } - - public async run(): Promise { - return { collection: { shapes: [] } }; - } - - public get name(): string { - return 'Remove filtered shapes'; - } - - public get parameters(): ActionParameters | null { - return null; - } -} - -class PropagateShapes extends BaseSingleFrameAction { - #targetFrame: number; - - public async init(instance, parameters): Promise { - this.#targetFrame = parameters['Target frame']; - } - - public async destroy(): Promise { - // nothing to destroy - } - - public async run( - instance, - { collection: { shapes }, frameData: { number } }, - ): Promise { - if (number === this.#targetFrame) { - return { collection: { shapes } }; - } - const propagatedShapes = propagateShapes(shapes, number, this.#targetFrame); - return { collection: { shapes: [...shapes, ...propagatedShapes] } }; - } - - public get name(): string { - return 'Propagate shapes'; - } - - public get parameters(): ActionParameters | null { - return { - 'Target frame': { - type: ActionParameterType.NUMBER, - values: ({ instance }) => { - if (instance instanceof Job) { - return [instance.startFrame, instance.stopFrame, 1].map((val) => val.toString()); - } - return [0, instance.size - 1, 1].map((val) => val.toString()); - }, - defaultValue: ({ instance }) => { - if (instance instanceof Job) { - return instance.stopFrame.toString(); - } - return (instance.size - 1).toString(); - }, - }, - }; - } - - public get frameSelection(): FrameSelectionType { - return FrameSelectionType.CURRENT_FRAME; - } -} - -const registeredActions: BaseSingleFrameAction[] = []; - -export async function listActions(): Promise { - return [...registeredActions]; -} - -export async function registerAction(action: BaseSingleFrameAction): Promise { - if (!(action instanceof BaseSingleFrameAction)) { - throw new ArgumentError('Provided action is not instance of BaseSingleFrameAction'); - } - - const { name } = action; - if (registeredActions.map((_action) => _action.name).includes(name)) { - throw new ArgumentError(`Action name must be unique. Name "${name}" is already exists`); - } - - registeredActions.push(action); -} - -registerAction(new RemoveFilteredShapes()); -registerAction(new PropagateShapes()); - -async function runSingleFrameChain( - instance: Job | Task, - actionsChain: BaseSingleFrameAction[], - actionParameters: Record[], - frameFrom: number, - frameTo: number, - filters: string[], - onProgress: (message: string, progress: number) => void, - cancelled: () => boolean, -): Promise { - type IDsToHandle = { shapes: number[] }; - const event = await instance.logger.log(EventScope.annotationsAction, { - from: frameFrom, - to: frameTo, - chain: actionsChain.map((action) => action.name).join(' => '), - }, true); - - // if called too fast, it will freeze UI, so, add throttling here - const wrappedOnProgress = throttle(onProgress, 100, { leading: true, trailing: true }); - const showMessageWithPause = async (message: string, progress: number, duration: number): Promise => { - // wrapper that gives a chance to abort action - wrappedOnProgress(message, progress); - await new Promise((resolve) => setTimeout(resolve, duration)); - }; - - try { - await showMessageWithPause('Actions initialization', 0, 500); - if (cancelled()) { - return; - } - - await Promise.all(actionsChain.map((action, idx) => { - const declaredParameters = action.parameters; - if (!declaredParameters) { - return action.init(instance, {}); - } - - const setupValues = actionParameters[idx]; - const parameters = Object.entries(declaredParameters).reduce((acc, [name, { type, defaultValue }]) => { - if (type === ActionParameterType.NUMBER) { - acc[name] = +(Object.hasOwn(setupValues, name) ? setupValues[name] : defaultValue); - } else { - acc[name] = (Object.hasOwn(setupValues, name) ? setupValues[name] : defaultValue); - } - return acc; - }, {} as Record); - - return action.init(instance, parameters); - })); - - const exportedCollection = getCollection(instance).export(); - const handledCollection: SingleFrameActionInput['collection'] = { shapes: [] }; - const modifiedCollectionIDs: IDsToHandle = { shapes: [] }; - - // Iterate over frames - const totalFrames = frameTo - frameFrom + 1; - for (let frame = frameFrom; frame <= frameTo; frame++) { - const frameData = await Object.getPrototypeOf(instance).frames - .get.implementation.call(instance, frame); - - // Ignore deleted frames - if (!frameData.deleted) { - // Get annotations according to filter - const states: ObjectState[] = await getAnnotations(instance, frame, false, filters); - const frameCollectionIDs = states.reduce((acc, val) => { - if (val.objectType === ObjectType.SHAPE) { - acc.shapes.push(val.clientID as number); - } - return acc; - }, { shapes: [] }); - - // Pick frame collection according to filtered IDs - let frameCollection = { - shapes: exportedCollection.shapes.filter((shape) => frameCollectionIDs - .shapes.includes(shape.clientID as number)), - }; - - // Iterate over actions on each not deleted frame - for await (const action of actionsChain) { - ({ collection: frameCollection } = await action.run(instance, { - collection: frameCollection, - frameData: { - width: frameData.width, - height: frameData.height, - number: frameData.number, - }, - })); - } - - const progress = Math.ceil(+(((frame - frameFrom) / totalFrames) * 100)); - wrappedOnProgress('Actions are running', progress); - if (cancelled()) { - return; - } - - handledCollection.shapes.push(...frameCollection.shapes.map((shape) => omit(shape, 'id'))); - modifiedCollectionIDs.shapes.push(...frameCollectionIDs.shapes); - } - } - - await showMessageWithPause('Commiting handled objects', 100, 1500); - if (cancelled()) { - return; - } - - exportedCollection.shapes.forEach((shape) => { - if (Number.isInteger(shape.clientID) && !modifiedCollectionIDs.shapes.includes(shape.clientID as number)) { - handledCollection.shapes.push(shape); - } - }); - - await instance.annotations.clear(); - await instance.actions.clear(); - await instance.annotations.import({ - ...handledCollection, - tracks: exportedCollection.tracks, - tags: exportedCollection.tags, - }); - - event.close(); - } finally { - wrappedOnProgress('Finalizing', 100); - await Promise.all(actionsChain.map((action) => action.destroy())); - } -} - -export async function runActions( - instance: Job | Task, - actionsChain: BaseSingleFrameAction[], - actionParameters: Record[], - frameFrom: number, - frameTo: number, - filters: string[], - onProgress: (message: string, progress: number) => void, - cancelled: () => boolean, -): Promise { - // there will be another function for MultiFrameChains (actions handling tracks) - return runSingleFrameChain( - instance, - actionsChain, - actionParameters, - frameFrom, - frameTo, - filters, - onProgress, - cancelled, - ); -} diff --git a/cvat-core/src/annotations-actions/annotations-actions.ts b/cvat-core/src/annotations-actions/annotations-actions.ts new file mode 100644 index 000000000000..172b8cd88e3d --- /dev/null +++ b/cvat-core/src/annotations-actions/annotations-actions.ts @@ -0,0 +1,113 @@ +// Copyright (C) 2024 CVAT.ai Corporation +// +// SPDX-License-Identifier: MIT + +import ObjectState from '../object-state'; +import { ArgumentError } from '../exceptions'; +import { Job, Task } from '../session'; +import { BaseAction } from './base-action'; +import { + BaseShapesAction, run as runShapesAction, call as callShapesAction, +} from './base-shapes-action'; +import { + BaseCollectionAction, run as runCollectionAction, call as callCollectionAction, +} from './base-collection-action'; + +import { RemoveFilteredShapes } from './remove-filtered-shapes'; +import { PropagateShapes } from './propagate-shapes'; + +const registeredActions: BaseAction[] = []; + +export async function listActions(): Promise { + return [...registeredActions]; +} + +export async function registerAction(action: BaseAction): Promise { + if (!(action instanceof BaseAction)) { + throw new ArgumentError('Provided action must inherit one of base classes'); + } + + const { name } = action; + if (registeredActions.map((_action) => _action.name).includes(name)) { + throw new ArgumentError(`Action name must be unique. Name "${name}" is already exists`); + } + + registeredActions.push(action); +} + +registerAction(new RemoveFilteredShapes()); +registerAction(new PropagateShapes()); + +export async function runAction( + instance: Job | Task, + action: BaseAction, + actionParameters: Record, + frameFrom: number, + frameTo: number, + filters: object[], + onProgress: (message: string, progress: number) => void, + cancelled: () => boolean, +): Promise { + if (action instanceof BaseShapesAction) { + return runShapesAction( + instance, + action, + actionParameters, + frameFrom, + frameTo, + filters, + onProgress, + cancelled, + ); + } + + if (action instanceof BaseCollectionAction) { + return runCollectionAction( + instance, + action, + actionParameters, + frameFrom, + filters, + onProgress, + cancelled, + ); + } + + return Promise.resolve(); +} + +export async function callAction( + instance: Job | Task, + action: BaseAction, + actionParameters: Record, + frame: number, + states: ObjectState[], + onProgress: (message: string, progress: number) => void, + cancelled: () => boolean, +): Promise { + if (action instanceof BaseShapesAction) { + return callShapesAction( + instance, + action, + actionParameters, + frame, + states, + onProgress, + cancelled, + ); + } + + if (action instanceof BaseCollectionAction) { + return callCollectionAction( + instance, + action, + actionParameters, + frame, + states, + onProgress, + cancelled, + ); + } + + return Promise.resolve(); +} diff --git a/cvat-core/src/annotations-actions/base-action.ts b/cvat-core/src/annotations-actions/base-action.ts new file mode 100644 index 000000000000..8a0abba4b32d --- /dev/null +++ b/cvat-core/src/annotations-actions/base-action.ts @@ -0,0 +1,61 @@ +// Copyright (C) 2024 CVAT.ai Corporation +// +// SPDX-License-Identifier: MIT + +import { SerializedCollection } from 'server-response-types'; +import ObjectState from '../object-state'; +import { Job, Task } from '../session'; + +export enum ActionParameterType { + SELECT = 'select', + NUMBER = 'number', + CHECKBOX = 'checkbox', +} + +// For SELECT values should be a list of possible options +// For NUMBER values should be a list with [min, max, step], +// or a callback ({ instance }: { instance: Job | Task }) => [min, max, step] +export type ActionParameters = Record string[]); + defaultValue: string | (({ instance }: { instance: Job | Task }) => string); +}>; + +export abstract class BaseAction { + public abstract init(sessionInstance: Job | Task, parameters: Record): Promise; + public abstract destroy(): Promise; + public abstract run(input: unknown): Promise; + public abstract applyFilter(input: unknown): unknown; + public abstract isApplicableForObject(objectState: ObjectState): boolean; + + public abstract get name(): string; + public abstract get parameters(): ActionParameters | null; +} + +export function prepareActionParameters(declared: ActionParameters, defined: object): Record { + if (!declared) { + return {}; + } + + return Object.entries(declared).reduce((acc, [name, { type, defaultValue }]) => { + if (type === ActionParameterType.NUMBER) { + acc[name] = +(Object.hasOwn(defined, name) ? defined[name] : defaultValue); + } else { + acc[name] = (Object.hasOwn(defined, name) ? defined[name] : defaultValue); + } + return acc; + }, {} as Record); +} + +export function validateClientIDs(collection: Partial) { + [].concat( + collection.shapes ?? [], + collection.tracks ?? [], + collection.tags ?? [], + ).forEach((object) => { + // clientID is required to correct collection filtering and committing in annotations actions logic + if (typeof object.clientID !== 'number') { + throw new Error('ClientID is undefined when running annotations action, but required'); + } + }); +} diff --git a/cvat-core/src/annotations-actions/base-collection-action.ts b/cvat-core/src/annotations-actions/base-collection-action.ts new file mode 100644 index 000000000000..f2676bbdc375 --- /dev/null +++ b/cvat-core/src/annotations-actions/base-collection-action.ts @@ -0,0 +1,177 @@ +// Copyright (C) 2024 CVAT.ai Corporation +// +// SPDX-License-Identifier: MIT + +import { throttle } from 'lodash'; + +import ObjectState from '../object-state'; +import AnnotationsFilter from '../annotations-filter'; +import { Job, Task } from '../session'; +import { + SerializedCollection, SerializedShape, + SerializedTag, SerializedTrack, +} from '../server-response-types'; +import { EventScope, ObjectType } from '../enums'; +import { getCollection } from '../annotations'; +import { BaseAction, prepareActionParameters, validateClientIDs } from './base-action'; + +export interface CollectionActionInput { + onProgress(message: string, percent: number): void; + cancelled(): boolean; + collection: Pick; + frameData: { + width: number; + height: number; + number: number; + }; +} + +export interface CollectionActionOutput { + created: CollectionActionInput['collection']; + deleted: CollectionActionInput['collection']; +} + +export abstract class BaseCollectionAction extends BaseAction { + public abstract run(input: CollectionActionInput): Promise; + public abstract applyFilter( + input: Pick, + ): CollectionActionInput['collection']; +} + +export async function run( + instance: Job | Task, + action: BaseCollectionAction, + actionParameters: Record, + frame: number, + filters: object[], + onProgress: (message: string, progress: number) => void, + cancelled: () => boolean, +): Promise { + const event = await instance.logger.log(EventScope.annotationsAction, { + from: frame, + to: frame, + name: action.name, + }, true); + + const wrappedOnProgress = throttle(onProgress, 100, { leading: true, trailing: true }); + const showMessageWithPause = async (message: string, progress: number, duration: number): Promise => { + // wrapper that gives a chance to abort action + wrappedOnProgress(message, progress); + await new Promise((resolve) => setTimeout(resolve, duration)); + }; + + try { + await showMessageWithPause('Action initialization', 0, 500); + if (cancelled()) { + return; + } + + await action.init(instance, prepareActionParameters(action.parameters, actionParameters)); + + const frameData = await Object.getPrototypeOf(instance).frames + .get.implementation.call(instance, frame); + const exportedCollection = getCollection(instance).export(); + + // Apply action filter first + const filteredByAction = action.applyFilter({ collection: exportedCollection, frameData }); + validateClientIDs(filteredByAction); + + let mapID2Obj = [].concat(filteredByAction.shapes, filteredByAction.tags, filteredByAction.tracks) + .reduce((acc, object) => { + acc[object.clientID as number] = object; + return acc; + }, {}); + + // Then apply user filter + const annotationsFilter = new AnnotationsFilter(); + const filteredCollectionIDs = annotationsFilter + .filterSerializedCollection(filteredByAction, instance.labels, filters); + const filteredByUser = { + shapes: filteredCollectionIDs.shapes.map((clientID) => mapID2Obj[clientID]), + tags: filteredCollectionIDs.tags.map((clientID) => mapID2Obj[clientID]), + tracks: filteredCollectionIDs.tracks.map((clientID) => mapID2Obj[clientID]), + }; + mapID2Obj = [].concat(filteredByUser.shapes, filteredByUser.tags, filteredByUser.tracks) + .reduce((acc, object) => { + acc[object.clientID as number] = object; + return acc; + }, {}); + + const { created, deleted } = await action.run({ + collection: filteredByUser, + frameData: { + width: frameData.width, + height: frameData.height, + number: frameData.number, + }, + onProgress: wrappedOnProgress, + cancelled, + }); + + await instance.annotations.commit(created, deleted, frame); + event.close(); + } finally { + await action.destroy(); + } +} + +export async function call( + instance: Job | Task, + action: BaseCollectionAction, + actionParameters: Record, + frame: number, + states: ObjectState[], + onProgress: (message: string, progress: number) => void, + cancelled: () => boolean, +): Promise { + const event = await instance.logger.log(EventScope.annotationsAction, { + from: frame, + to: frame, + name: action.name, + }, true); + + const throttledOnProgress = throttle(onProgress, 100, { leading: true, trailing: true }); + try { + await action.init(instance, prepareActionParameters(action.parameters, actionParameters)); + const exportedStates = await Promise.all(states.map((state) => state.export())); + const exportedCollection = exportedStates.reduce((acc, value, idx) => { + if (states[idx].objectType === ObjectType.SHAPE) { + acc.shapes.push(value as SerializedShape); + } + + if (states[idx].objectType === ObjectType.TAG) { + acc.tags.push(value as SerializedTag); + } + + if (states[idx].objectType === ObjectType.TRACK) { + acc.tracks.push(value as SerializedTrack); + } + + return acc; + }, { shapes: [], tags: [], tracks: [] }); + + const frameData = await Object.getPrototypeOf(instance).frames.get.implementation.call(instance, frame); + const filteredByAction = action.applyFilter({ collection: exportedCollection, frameData }); + validateClientIDs(filteredByAction); + + const processedCollection = await action.run({ + onProgress: throttledOnProgress, + cancelled, + collection: filteredByAction, + frameData: { + width: frameData.width, + height: frameData.height, + number: frameData.number, + }, + }); + + await instance.annotations.commit( + processedCollection.created, + processedCollection.deleted, + frame, + ); + event.close(); + } finally { + await action.destroy(); + } +} diff --git a/cvat-core/src/annotations-actions/base-shapes-action.ts b/cvat-core/src/annotations-actions/base-shapes-action.ts new file mode 100644 index 000000000000..e5223f085d2d --- /dev/null +++ b/cvat-core/src/annotations-actions/base-shapes-action.ts @@ -0,0 +1,195 @@ +// Copyright (C) 2024 CVAT.ai Corporation +// +// SPDX-License-Identifier: MIT + +import { throttle } from 'lodash'; + +import ObjectState from '../object-state'; +import AnnotationsFilter from '../annotations-filter'; +import { Job, Task } from '../session'; +import { SerializedCollection, SerializedShape } from '../server-response-types'; +import { EventScope, ObjectType } from '../enums'; +import { getCollection } from '../annotations'; +import { BaseAction, prepareActionParameters, validateClientIDs } from './base-action'; + +export interface ShapesActionInput { + onProgress(message: string, percent: number): void; + cancelled(): boolean; + collection: Pick; + frameData: { + width: number; + height: number; + number: number; + }; +} + +export interface ShapesActionOutput { + created: ShapesActionInput['collection']; + deleted: ShapesActionInput['collection']; +} + +export abstract class BaseShapesAction extends BaseAction { + public abstract run(input: ShapesActionInput): Promise; + public abstract applyFilter( + input: Pick + ): ShapesActionInput['collection']; +} + +export async function run( + instance: Job | Task, + action: BaseShapesAction, + actionParameters: Record, + frameFrom: number, + frameTo: number, + filters: object[], + onProgress: (message: string, progress: number) => void, + cancelled: () => boolean, +): Promise { + const event = await instance.logger.log(EventScope.annotationsAction, { + from: frameFrom, + to: frameTo, + name: action.name, + }, true); + + const throttledOnProgress = throttle(onProgress, 100, { leading: true, trailing: true }); + const showMessageWithPause = async (message: string, progress: number, duration: number): Promise => { + // wrapper that gives a chance to abort action + throttledOnProgress(message, progress); + await new Promise((resolve) => setTimeout(resolve, duration)); + }; + + try { + await showMessageWithPause('Actions initialization', 0, 500); + if (cancelled()) { + return; + } + + await action.init(instance, prepareActionParameters(action.parameters, actionParameters)); + + const exportedCollection = getCollection(instance).export(); + validateClientIDs(exportedCollection); + + const annotationsFilter = new AnnotationsFilter(); + const filteredShapeIDs = annotationsFilter.filterSerializedCollection({ + shapes: exportedCollection.shapes, + tags: [], + tracks: [], + }, instance.labels, filters).shapes; + + const filteredShapesByFrame = exportedCollection.shapes.reduce((acc, shape) => { + if (shape.frame >= frameFrom && shape.frame <= frameTo && filteredShapeIDs.includes(shape.clientID)) { + acc[shape.frame] = acc[shape.frame] ?? []; + acc[shape.frame].push(shape); + } + return acc; + }, {} as Record); + + const totalUpdates = { created: { shapes: [] }, deleted: { shapes: [] } }; + // Iterate over frames + const totalFrames = frameTo - frameFrom + 1; + for (let frame = frameFrom; frame <= frameTo; frame++) { + const frameData = await Object.getPrototypeOf(instance).frames + .get.implementation.call(instance, frame); + + // Ignore deleted frames + if (!frameData.deleted) { + const frameShapes = filteredShapesByFrame[frame] ?? []; + if (!frameShapes.length) { + continue; + } + + // finally apply the own filter of the action + const filteredByAction = action.applyFilter({ + collection: { + shapes: frameShapes, + }, + frameData, + }); + validateClientIDs(filteredByAction); + + const { created, deleted } = await action.run({ + onProgress: throttledOnProgress, + cancelled, + collection: { shapes: filteredByAction.shapes }, + frameData: { + width: frameData.width, + height: frameData.height, + number: frameData.number, + }, + }); + + Array.prototype.push.apply(totalUpdates.created.shapes, created.shapes); + Array.prototype.push.apply(totalUpdates.deleted.shapes, deleted.shapes); + + const progress = Math.ceil(+(((frame - frameFrom) / totalFrames) * 100)); + throttledOnProgress('Actions are running', progress); + if (cancelled()) { + return; + } + } + } + + await showMessageWithPause('Committing handled objects', 100, 1500); + if (cancelled()) { + return; + } + + await instance.annotations.commit( + { shapes: totalUpdates.created.shapes, tags: [], tracks: [] }, + { shapes: totalUpdates.deleted.shapes, tags: [], tracks: [] }, + frameFrom, + ); + + event.close(); + } finally { + await action.destroy(); + } +} + +export async function call( + instance: Job | Task, + action: BaseShapesAction, + actionParameters: Record, + frame: number, + states: ObjectState[], + onProgress: (message: string, progress: number) => void, + cancelled: () => boolean, +): Promise { + const event = await instance.logger.log(EventScope.annotationsAction, { + from: frame, + to: frame, + name: action.name, + }, true); + + const throttledOnProgress = throttle(onProgress, 100, { leading: true, trailing: true }); + try { + await action.init(instance, prepareActionParameters(action.parameters, actionParameters)); + + const exported = await Promise.all(states.filter((state) => state.objectType === ObjectType.SHAPE) + .map((state) => state.export())) as SerializedShape[]; + const frameData = await Object.getPrototypeOf(instance).frames.get.implementation.call(instance, frame); + const filteredByAction = action.applyFilter({ collection: { shapes: exported }, frameData }); + validateClientIDs(filteredByAction); + + const processedCollection = await action.run({ + onProgress: throttledOnProgress, + cancelled, + collection: { shapes: filteredByAction.shapes }, + frameData: { + width: frameData.width, + height: frameData.height, + number: frameData.number, + }, + }); + + await instance.annotations.commit( + { shapes: processedCollection.created.shapes, tags: [], tracks: [] }, + { shapes: processedCollection.deleted.shapes, tags: [], tracks: [] }, + frame, + ); + + event.close(); + } finally { + await action.destroy(); + } +} diff --git a/cvat-core/src/annotations-actions/propagate-shapes.ts b/cvat-core/src/annotations-actions/propagate-shapes.ts new file mode 100644 index 000000000000..ee68b9600f4f --- /dev/null +++ b/cvat-core/src/annotations-actions/propagate-shapes.ts @@ -0,0 +1,85 @@ +// Copyright (C) 2024 CVAT.ai Corporation +// +// SPDX-License-Identifier: MIT + +import { range } from 'lodash'; + +import ObjectState from '../object-state'; +import { Job, Task } from '../session'; +import { SerializedShape } from '../server-response-types'; +import { propagateShapes } from '../object-utils'; +import { ObjectType } from '../enums'; + +import { ActionParameterType, ActionParameters } from './base-action'; +import { BaseCollectionAction, CollectionActionInput, CollectionActionOutput } from './base-collection-action'; + +export class PropagateShapes extends BaseCollectionAction { + #instance: Task | Job; + #targetFrame: number; + + public async init(instance: Job | Task, parameters): Promise { + this.#instance = instance; + this.#targetFrame = parameters['Target frame']; + } + + public async destroy(): Promise { + // nothing to destroy + } + + public async run(input: CollectionActionInput): Promise { + const { collection, frameData: { number } } = input; + if (number === this.#targetFrame) { + return { + created: { shapes: [], tags: [], tracks: [] }, + deleted: { shapes: [], tags: [], tracks: [] }, + }; + } + + const frameNumbers = this.#instance instanceof Job ? + await this.#instance.frames.frameNumbers() : range(0, this.#instance.size); + const propagatedShapes = propagateShapes( + collection.shapes, number, this.#targetFrame, frameNumbers, + ); + + return { + created: { shapes: propagatedShapes, tags: [], tracks: [] }, + deleted: { shapes: [], tags: [], tracks: [] }, + }; + } + + public applyFilter(input: CollectionActionInput): CollectionActionInput['collection'] { + return { + shapes: input.collection.shapes.filter((shape) => shape.frame === input.frameData.number), + tags: [], + tracks: [], + }; + } + + public isApplicableForObject(objectState: ObjectState): boolean { + return objectState.objectType === ObjectType.SHAPE; + } + + public get name(): string { + return 'Propagate shapes'; + } + + public get parameters(): ActionParameters | null { + return { + 'Target frame': { + type: ActionParameterType.NUMBER, + values: ({ instance }) => { + if (instance instanceof Job) { + return [instance.startFrame, instance.stopFrame, 1].map((val) => val.toString()); + } + return [0, instance.size - 1, 1].map((val) => val.toString()); + }, + defaultValue: ({ instance }) => { + if (instance instanceof Job) { + return instance.stopFrame.toString(); + } + return (instance.size - 1).toString(); + }, + }, + }; + } +} diff --git a/cvat-core/src/annotations-actions/remove-filtered-shapes.ts b/cvat-core/src/annotations-actions/remove-filtered-shapes.ts new file mode 100644 index 000000000000..ab2a30964fad --- /dev/null +++ b/cvat-core/src/annotations-actions/remove-filtered-shapes.ts @@ -0,0 +1,41 @@ +// Copyright (C) 2024 CVAT.ai Corporation +// +// SPDX-License-Identifier: MIT + +import { BaseShapesAction, ShapesActionInput, ShapesActionOutput } from './base-shapes-action'; +import { ActionParameters } from './base-action'; + +export class RemoveFilteredShapes extends BaseShapesAction { + public async init(): Promise { + // nothing to init + } + + public async destroy(): Promise { + // nothing to destroy + } + + public async run(input: ShapesActionInput): Promise { + return { + created: { shapes: [] }, + deleted: input.collection, + }; + } + + public applyFilter(input: ShapesActionInput): ShapesActionInput['collection'] { + const { collection } = input; + return collection; + } + + public isApplicableForObject(): boolean { + // remove action does not make sense when running on one object + return false; + } + + public get name(): string { + return 'Remove filtered shapes'; + } + + public get parameters(): ActionParameters | null { + return null; + } +} diff --git a/cvat-core/src/annotations-collection.ts b/cvat-core/src/annotations-collection.ts index 291fcc6c3e97..25496dfe69a7 100644 --- a/cvat-core/src/annotations-collection.ts +++ b/cvat-core/src/annotations-collection.ts @@ -157,9 +157,68 @@ export default class Collection { return result; } - public export(): Omit { + public commit( + appended: Omit, + removed: Omit, + frame: number, + ): { tags: Tag[]; shapes: Shape[]; tracks: Track[]; } { + const isCollectionConsistent = [].concat(removed.shapes, removed.tags, removed.tracks) + .every((object) => typeof object.clientID === 'number' && + Object.prototype.hasOwnProperty.call(this.objects, object.clientID)); + + if (!isCollectionConsistent) { + throw new ArgumentError('Objects required to be deleted were not found in the collection'); + } + + const removedCollection: (Shape | Tag | Track)[] = [].concat(removed.shapes, removed.tags, removed.tracks) + .map((object) => this.objects[object.clientID as number]); + + const imported = this.import(appended); + const appendedCollection = ([] as (Shape | Tag | Track)[]) + .concat(imported.shapes, imported.tags, imported.tracks); + if (!(appendedCollection.length > 0 || removedCollection.length > 0)) { + // nothing to commit + return; + } + + let prevRemoved = []; + removedCollection.forEach((collectionObject) => { + prevRemoved.push(collectionObject.removed); + collectionObject.removed = true; + }); + + this.history.do( + HistoryActions.COMMIT_ANNOTATIONS, + () => { + removedCollection.forEach((collectionObject, idx) => { + collectionObject.removed = prevRemoved[idx]; + }); + prevRemoved = []; + appendedCollection.forEach((collectionObject) => { + collectionObject.removed = true; + }); + }, + () => { + removedCollection.forEach((collectionObject) => { + prevRemoved.push(collectionObject.removed); + collectionObject.removed = true; + }); + appendedCollection.forEach((collectionObject) => { + collectionObject.removed = false; + }); + }, + [].concat( + removedCollection.map((object) => object.clientID), + appendedCollection.map((object) => object.clientID), + ), + frame, + ); + } + + public export(): Pick { const data = { - tracks: this.tracks.filter((track) => !track.removed).map((track) => track.toJSON() as SerializedTrack), + tracks: this.tracks.filter((track) => !track.removed) + .map((track) => track.toJSON() as SerializedTrack), shapes: Object.values(this.shapes) .reduce((accumulator, frameShapes) => { accumulator.push(...frameShapes); @@ -201,7 +260,7 @@ export default class Collection { } const objectStates = []; - const filtered = this.annotationsFilter.filter(visible, filters); + const filtered = this.annotationsFilter.filterSerializedObjectStates(visible, filters); visible.forEach((stateData) => { if (!filters.length || filtered.includes(stateData.clientID)) { @@ -1295,7 +1354,7 @@ export default class Collection { const predicate = sign > 0 ? (frame) => frame <= frameTo : (frame) => frame >= frameTo; const update = sign > 0 ? (frame) => frame + 1 : (frame) => frame - 1; - // if not looking for an emty frame nor frame with annotations, return the next frame + // if not looking for an empty frame nor frame with annotations, return the next frame // check if deleted frames are allowed additionally if (!annotationsFilters) { let frame = frameFrom; @@ -1338,7 +1397,7 @@ export default class Collection { statesData.push(...tracks.map((track) => track.get(frame)).filter((state) => !state.outside)); // Filtering - const filtered = this.annotationsFilter.filter(statesData, annotationsFilters); + const filtered = this.annotationsFilter.filterSerializedObjectStates(statesData, annotationsFilters); if (filtered.length) { return frame; } diff --git a/cvat-core/src/annotations-filter.ts b/cvat-core/src/annotations-filter.ts index 58c9e82a63e5..fa7b8e739f5a 100644 --- a/cvat-core/src/annotations-filter.ts +++ b/cvat-core/src/annotations-filter.ts @@ -6,15 +6,74 @@ import jsonLogic from 'json-logic-js'; import { SerializedData } from './object-state'; import { AttributeType, ObjectType, ShapeType } from './enums'; +import { SerializedCollection } from './server-response-types'; +import { Attribute, Label } from './labels'; function adjustName(name): string { return name.replace(/\./g, '\u2219'); } +function getDimensions(points: number[], shapeType: ShapeType): { + width: number | null; + height: number | null; +} { + let [width, height]: (number | null)[] = [null, null]; + if (shapeType === ShapeType.MASK) { + const [xtl, ytl, xbr, ybr] = points.slice(-4); + [width, height] = [xbr - xtl + 1, ybr - ytl + 1]; + } else if (shapeType === ShapeType.ELLIPSE) { + const [cx, cy, rightX, topY] = points; + width = Math.abs(rightX - cx) * 2; + height = Math.abs(cy - topY) * 2; + } else { + let xtl = Number.MAX_SAFE_INTEGER; + let xbr = Number.MIN_SAFE_INTEGER; + let ytl = Number.MAX_SAFE_INTEGER; + let ybr = Number.MIN_SAFE_INTEGER; + + points.forEach((coord, idx) => { + if (idx % 2) { + // y + ytl = Math.min(ytl, coord); + ybr = Math.max(ybr, coord); + } else { + // x + xtl = Math.min(xtl, coord); + xbr = Math.max(xbr, coord); + } + }); + [width, height] = [xbr - xtl, ybr - ytl]; + } + + return { + width, + height, + }; +} + +function convertAttribute(id: number, value: string, attributesSpec: Record): [ + string, + number | boolean | string, +] { + const spec = attributesSpec[id]; + const name = adjustName(spec.name); + if (spec.inputType === AttributeType.NUMBER) { + return [name, +value]; + } + + if (spec.inputType === AttributeType.CHECKBOX) { + return [name, value === 'true']; + } + + return [name, value]; +} + +type ConvertedAttributes = Record; + interface ConvertedObjectData { width: number | null; height: number | null; - attr: Record>; + attr: Record; label: string; serverID: number; objectID: number; @@ -24,7 +83,7 @@ interface ConvertedObjectData { } export default class AnnotationsFilter { - _convertObjects(statesData: SerializedData[]): ConvertedObjectData[] { + private _convertSerializedObjectStates(statesData: SerializedData[]): ConvertedObjectData[] { const objects = statesData.map((state) => { const labelAttributes = state.label.attributes.reduce((acc, attr) => { acc[attr.id] = attr; @@ -33,50 +92,26 @@ export default class AnnotationsFilter { let [width, height]: (number | null)[] = [null, null]; if (state.objectType !== ObjectType.TAG) { - if (state.shapeType === ShapeType.MASK) { - const [xtl, ytl, xbr, ybr] = state.points.slice(-4); - [width, height] = [xbr - xtl + 1, ybr - ytl + 1]; - } else { - let xtl = Number.MAX_SAFE_INTEGER; - let xbr = Number.MIN_SAFE_INTEGER; - let ytl = Number.MAX_SAFE_INTEGER; - let ybr = Number.MIN_SAFE_INTEGER; - - const points = state.points || state.elements.reduce((acc, val) => { - acc.push(val.points); - return acc; - }, []).flat(); - points.forEach((coord, idx) => { - if (idx % 2) { - // y - ytl = Math.min(ytl, coord); - ybr = Math.max(ybr, coord); - } else { - // x - xtl = Math.min(xtl, coord); - xbr = Math.max(xbr, coord); - } - }); - [width, height] = [xbr - xtl, ybr - ytl]; - } + const points = state.shapeType === ShapeType.SKELETON ? state.elements.reduce((acc, val) => { + acc.push(val.points); + return acc; + }, []).flat() : state.points; + + ({ width, height } = getDimensions(points, state.shapeType as ShapeType)); } - const attributes = Object.keys(state.attributes).reduce>((acc, key) => { - const attr = labelAttributes[key]; - let value = state.attributes[key]; - if (attr.inputType === AttributeType.NUMBER) { - value = +value; - } else if (attr.inputType === AttributeType.CHECKBOX) { - value = value === 'true'; - } - acc[adjustName(attr.name)] = value; + const attributes = Object.keys(state.attributes).reduce((acc, key) => { + const [name, value] = convertAttribute(+key, state.attributes[key], labelAttributes); + acc[name] = value; return acc; - }, {}); + }, {} as Record); return { width, height, - attr: Object.fromEntries([[adjustName(state.label.name), attributes]]), + attr: { + [adjustName(state.label.name)]: attributes, + }, label: state.label.name, serverID: state.serverID, objectID: state.clientID, @@ -89,11 +124,119 @@ export default class AnnotationsFilter { return objects; } - filter(statesData: SerializedData[], filters: object[]): number[] { - if (!filters.length) return statesData.map((stateData): number => stateData.clientID); - const converted = this._convertObjects(statesData); + private _convertSerializedCollection( + collection: Omit, + labelsSpec: Label[], + ): { shapes: ConvertedObjectData[]; tags: ConvertedObjectData[]; tracks: ConvertedObjectData[]; } { + const labelByID = labelsSpec.reduce>((acc, label) => ({ + [label.id]: label, + ...acc, + }), {}); + + const attributeById = labelsSpec.map((label) => label.attributes).flat().reduce((acc, attribute) => ({ + ...acc, + [attribute.id]: attribute, + }), {} as Record); + + const convertAttributes = ( + attributes: SerializedCollection['shapes'][0]['attributes'], + ): ConvertedAttributes => attributes.reduce((acc, { spec_id, value }) => { + const [name, adjustedValue] = convertAttribute(spec_id, value, attributeById); + acc[name] = adjustedValue; + return acc; + }, {} as Record); + + return { + shapes: collection.shapes.map((shape) => { + const label = labelByID[shape.label_id]; + const points = shape.type === ShapeType.SKELETON ? + shape.elements.map((el) => el.points).flat() : shape.points; + let [width, height]: (number | null)[] = [null, null]; + ({ width, height } = getDimensions(points, shape.type)); + + return { + width, + height, + attr: { + [adjustName(label.name)]: convertAttributes(shape.attributes), + }, + label: label.name, + serverID: shape.id ?? null, + type: ObjectType.SHAPE, + shape: shape.type, + occluded: shape.occluded, + objectID: shape.clientID ?? null, + }; + }), + tags: collection.tags.map((tag) => { + const label = labelByID[tag.label_id]; + + return { + width: null, + height: null, + attr: { + [adjustName(label.name)]: convertAttributes(tag.attributes), + }, + label: labelByID[tag.label_id]?.name ?? null, + serverID: tag.id ?? null, + type: ObjectType.SHAPE, + shape: null, + occluded: false, + objectID: tag.clientID ?? null, + }; + }), + tracks: collection.tracks.map((track) => { + const label = labelByID[track.label_id]; + + return { + width: null, + height: null, + attr: { + [adjustName(label.name)]: convertAttributes(track.attributes), + }, + label: labelByID[track.label_id]?.name ?? null, + serverID: track.id, + type: ObjectType.TRACK, + shape: track.shapes[0]?.type ?? null, + occluded: null, + objectID: track.clientID ?? null, + }; + }), + }; + } + + public filterSerializedObjectStates(statesData: SerializedData[], filters: object[]): number[] { + if (!filters.length) { + return statesData.map((stateData): number => stateData.clientID); + } + + const converted = this._convertSerializedObjectStates(statesData); return converted .map((state) => state.objectID) .filter((_, index) => jsonLogic.apply(filters[0], converted[index])); } + + public filterSerializedCollection( + collection: Omit, + labelsSpec: Label[], + filters: object[], + ): { shapes: number[]; tags: number[]; tracks: number[]; } { + if (!filters.length) { + return { + shapes: collection.shapes.map((shape) => shape.clientID), + tags: collection.tags.map((tag) => tag.clientID), + tracks: collection.tracks.map((track) => track.clientID), + }; + } + + const converted = this._convertSerializedCollection(collection, labelsSpec); + return { + shapes: converted.shapes.map((shape) => shape.objectID) + .filter((_, index) => jsonLogic.apply(filters[0], converted.shapes[index])), + tags: converted.tags.map((shape) => shape.objectID) + .filter((_, index) => jsonLogic.apply(filters[0], converted.tags[index])), + tracks: converted.tracks.map((shape) => shape.objectID) + .filter((_, index) => jsonLogic.apply(filters[0], converted.tracks[index])), + }; + } } diff --git a/cvat-core/src/annotations-history.ts b/cvat-core/src/annotations-history.ts index 748d55bcf93d..2e59db96ea1f 100644 --- a/cvat-core/src/annotations-history.ts +++ b/cvat-core/src/annotations-history.ts @@ -5,7 +5,7 @@ import { HistoryActions } from './enums'; -const MAX_HISTORY_LENGTH = 128; +const MAX_HISTORY_LENGTH = 32; interface ActionItem { action: HistoryActions; diff --git a/cvat-core/src/annotations-objects.ts b/cvat-core/src/annotations-objects.ts index defcf7dbbada..ab7e32de9784 100644 --- a/cvat-core/src/annotations-objects.ts +++ b/cvat-core/src/annotations-objects.ts @@ -150,17 +150,12 @@ class Annotation { injection.groups.max = Math.max(injection.groups.max, this.group); } - protected withContext(frame: number): { - __internal: { - save: (data: ObjectState) => ObjectState; - delete: Annotation['delete']; - }; + // eslint-disable-next-line @typescript-eslint/no-unused-vars + protected withContext(_: number): { + delete: Annotation['delete']; } { return { - __internal: { - save: (this as any).save.bind(this, frame), - delete: this.delete.bind(this), - }, + delete: this.delete.bind(this), }; } @@ -530,6 +525,17 @@ export class Shape extends Drawn { this.zOrder = data.z_order; } + protected withContext(frame: number): ReturnType & { + save: (data: ObjectState) => ObjectState; + export: () => SerializedShape; + } { + return { + ...super.withContext(frame), + save: this.save.bind(this, frame), + export: this.toJSON.bind(this) as () => SerializedShape, + }; + } + // Method is used to export data to the server public toJSON(): SerializedShape | SerializedShape['elements'][0] { const result: SerializedShape = { @@ -592,7 +598,7 @@ export class Shape extends Drawn { pinned: this.pinned, frame, source: this.source, - ...this.withContext(frame), + __internal: this.withContext(frame), }; if (typeof this.outside !== 'undefined') { @@ -838,6 +844,17 @@ export class Track extends Drawn { }, {}); } + protected withContext(frame: number): ReturnType & { + save: (data: ObjectState) => ObjectState; + export: () => SerializedTrack; + } { + return { + ...super.withContext(frame), + save: this.save.bind(this, frame), + export: this.toJSON.bind(this) as () => SerializedTrack, + }; + } + // Method is used to export data to the server public toJSON(): SerializedTrack | SerializedTrack['elements'][0] { const labelAttributes = attrsAsAnObject(this.label.attributes); @@ -931,7 +948,7 @@ export class Track extends Drawn { }, frame, source: this.source, - ...this.withContext(frame), + __internal: this.withContext(frame), }; } @@ -1405,6 +1422,17 @@ export class Track extends Drawn { } export class Tag extends Annotation { + protected withContext(frame: number): ReturnType & { + save: (data: ObjectState) => ObjectState; + export: () => SerializedTag; + } { + return { + ...super.withContext(frame), + save: this.save.bind(this, frame), + export: this.toJSON.bind(this) as () => SerializedTag, + }; + } + // Method is used to export data to the server public toJSON(): SerializedTag { const result: SerializedTag = { @@ -1451,7 +1479,7 @@ export class Tag extends Annotation { updated: this.updated, frame, source: this.source, - ...this.withContext(frame), + __internal: this.withContext(frame), }; } @@ -2022,7 +2050,7 @@ export class SkeletonShape extends Shape { hidden: elements.every((el) => el.hidden), frame, source: this.source, - ...this.withContext(frame), + __internal: this.withContext(frame), }; } @@ -3064,7 +3092,7 @@ export class SkeletonTrack extends Track { occluded: elements.every((el) => el.occluded), lock: elements.every((el) => el.lock), hidden: elements.every((el) => el.hidden), - ...this.withContext(frame), + __internal: this.withContext(frame), }; } diff --git a/cvat-core/src/api-implementation.ts b/cvat-core/src/api-implementation.ts index 0e9f400ad499..c9e53a2e1e0d 100644 --- a/cvat-core/src/api-implementation.ts +++ b/cvat-core/src/api-implementation.ts @@ -39,7 +39,9 @@ import QualityConflict, { ConflictSeverity } from './quality-conflict'; import QualitySettings from './quality-settings'; import { getFramesMeta } from './frames'; import AnalyticsReport from './analytics-report'; -import { listActions, registerAction, runActions } from './annotations-actions'; +import { + callAction, listActions, registerAction, runAction, +} from './annotations-actions/annotations-actions'; import { convertDescriptions, getServerAPISchema } from './server-schema'; import { JobType } from './enums'; import { PaginatedResource } from './core-types'; @@ -54,7 +56,8 @@ export default function implementAPI(cvat: CVATCore): CVATCore { implementationMixin(cvat.plugins.register, PluginRegistry.register.bind(cvat)); implementationMixin(cvat.actions.list, listActions); implementationMixin(cvat.actions.register, registerAction); - implementationMixin(cvat.actions.run, runActions); + implementationMixin(cvat.actions.run, runAction); + implementationMixin(cvat.actions.call, callAction); implementationMixin(cvat.lambda.list, lambdaManager.list.bind(lambdaManager)); implementationMixin(cvat.lambda.run, lambdaManager.run.bind(lambdaManager)); diff --git a/cvat-core/src/api.ts b/cvat-core/src/api.ts index 5f624ad0e8ae..60de43fd4b18 100644 --- a/cvat-core/src/api.ts +++ b/cvat-core/src/api.ts @@ -21,12 +21,14 @@ import CloudStorage from './cloud-storage'; import Organization from './organization'; import Webhook from './webhook'; import AnnotationGuide from './guide'; -import BaseSingleFrameAction from './annotations-actions'; +import { BaseAction } from './annotations-actions/base-action'; +import { BaseCollectionAction } from './annotations-actions/base-collection-action'; +import { BaseShapesAction } from './annotations-actions/base-shapes-action'; import QualityReport from './quality-report'; import QualityConflict from './quality-conflict'; import QualitySettings from './quality-settings'; import AnalyticsReport from './analytics-report'; -import ValidationLayout from './validation-layout'; +import { JobValidationLayout, TaskValidationLayout } from './validation-layout'; import { Request } from './request'; import * as enums from './enums'; @@ -191,14 +193,14 @@ function build(): CVATCore { const result = await PluginRegistry.apiWrapper(cvat.actions.list); return result; }, - async register(action: BaseSingleFrameAction) { + async register(action: BaseAction) { const result = await PluginRegistry.apiWrapper(cvat.actions.register, action); return result; }, async run( instance: Job | Task, - actionsChain: BaseSingleFrameAction[], - actionsParameters: Record[], + actions: BaseAction, + actionsParameters: Record, frameFrom: number, frameTo: number, filters: string[], @@ -211,7 +213,7 @@ function build(): CVATCore { const result = await PluginRegistry.apiWrapper( cvat.actions.run, instance, - actionsChain, + actions, actionsParameters, frameFrom, frameTo, @@ -221,6 +223,30 @@ function build(): CVATCore { ); return result; }, + async call( + instance: Job | Task, + actions: BaseAction, + actionsParameters: Record, + frame: number, + states: ObjectState[], + onProgress: ( + message: string, + progress: number, + ) => void, + cancelled: () => boolean, + ) { + const result = await PluginRegistry.apiWrapper( + cvat.actions.call, + instance, + actions, + actionsParameters, + frame, + states, + onProgress, + cancelled, + ); + return result; + }, }, lambda: { async list() { @@ -294,6 +320,12 @@ function build(): CVATCore { set requestsStatusDelay(value) { config.requestsStatusDelay = value; }, + get jobMetaDataReloadPeriod() { + return config.jobMetaDataReloadPeriod; + }, + set jobMetaDataReloadPeriod(value) { + config.jobMetaDataReloadPeriod = value; + }, }, client: { version: `${pjson.version}`, @@ -420,14 +452,16 @@ function build(): CVATCore { Organization, Webhook, AnnotationGuide, - BaseSingleFrameAction, + BaseShapesAction, + BaseCollectionAction, QualitySettings, AnalyticsReport, QualityConflict, QualityReport, Request, FramesMetaData, - ValidationLayout, + JobValidationLayout, + TaskValidationLayout, }, utils: { mask2Rle, diff --git a/cvat-core/src/cloud-storage.ts b/cvat-core/src/cloud-storage.ts index e4e4fb0e5d23..1e7cdeb8d7f7 100644 --- a/cvat-core/src/cloud-storage.ts +++ b/cvat-core/src/cloud-storage.ts @@ -290,7 +290,7 @@ Object.defineProperties(CloudStorage.prototype.save, { } // update if (typeof this.id !== 'undefined') { - // provider_type and recource should not change; + // provider_type and resource should not change; // send to the server only the values that have changed const initialData: SerializedCloudStorage = {}; if (this.displayName) { diff --git a/cvat-core/src/config.ts b/cvat-core/src/config.ts index 99d76a723655..eefb535814bb 100644 --- a/cvat-core/src/config.ts +++ b/cvat-core/src/config.ts @@ -19,6 +19,8 @@ const config = { globalObjectsCounter: 0, requestsStatusDelay: null, + + jobMetaDataReloadPeriod: 1 * 60 * 60 * 1000, // 1 hour }; export default config; diff --git a/cvat-core/src/core-types.ts b/cvat-core/src/core-types.ts index e44a354cb5bd..c05b7b6ba4a5 100644 --- a/cvat-core/src/core-types.ts +++ b/cvat-core/src/core-types.ts @@ -2,7 +2,9 @@ // // SPDX-License-Identifier: MIT -import { ModelKind, ModelReturnType, ShapeType } from './enums'; +import { + ModelKind, ModelReturnType, RQStatus, ShapeType, +} from './enums'; export interface ModelAttribute { name: string; @@ -54,4 +56,10 @@ export interface SerializedModel { updated_date?: string; } +export interface UpdateStatusData { + status: RQStatus; + progress: number; + message: string; +} + export type PaginatedResource = T[] & { count: number }; diff --git a/cvat-core/src/enums.ts b/cvat-core/src/enums.ts index 1b291662d213..25fdf815fa20 100644 --- a/cvat-core/src/enums.ts +++ b/cvat-core/src/enums.ts @@ -148,6 +148,7 @@ export enum HistoryActions { REMOVED_OBJECT = 'Removed object', REMOVED_FRAME = 'Removed frame', RESTORED_FRAME = 'Restored frame', + COMMIT_ANNOTATIONS = 'Commit annotations', } export enum ModelKind { diff --git a/cvat-core/src/frames.ts b/cvat-core/src/frames.ts index 1192058c11b3..3305edfc5aab 100644 --- a/cvat-core/src/frames.ts +++ b/cvat-core/src/frames.ts @@ -12,6 +12,7 @@ import serverProxy from './server-proxy'; import { SerializedFramesMetaData } from './server-response-types'; import { Exception, ArgumentError, DataError } from './exceptions'; import { FieldUpdateTrigger } from './common'; +import config from './config'; // frame storage by job id const frameDataCache: Record this.getJobRelativeFrameNumber(frame) + jobStartFrame); + } + getDataFrameNumbers(): number[] { if (this.includedFrames) { return [...this.includedFrames]; @@ -348,9 +354,7 @@ Object.defineProperty(FrameData.prototype.data, 'implementation', { const requestId = +_.uniqueId(); const requestedDataFrameNumber = meta.getDataFrameNumber(this.number - jobStartFrame); const chunkIndex = meta.getFrameChunkIndex(requestedDataFrameNumber); - const segmentFrameNumbers = meta.getDataFrameNumbers().map((dataFrameNumber: number) => ( - meta.getJobRelativeFrameNumber(dataFrameNumber) + jobStartFrame - )); + const segmentFrameNumbers = meta.getSegmentFrameNumbers(jobStartFrame); const frame = provider.frame(this.number); function findTheNextNotDecodedChunk(currentFrameIndex: number): number | null { @@ -532,41 +536,55 @@ Object.defineProperty(FrameData.prototype.data, 'implementation', { writable: false, }); -export async function getFramesMeta(type: 'job' | 'task', id: number, forceReload = false): Promise { +export function getFramesMeta(type: 'job' | 'task', id: number, forceReload = false): Promise { if (type === 'task') { // we do not cache task meta currently. So, each new call will results to the server request - const result = await serverProxy.frames.getMeta('task', id); - return new FramesMetaData({ - ...result, - deleted_frames: Object.fromEntries(result.deleted_frames.map((_frame) => [_frame, true])), - }); + return serverProxy.frames.getMeta('task', id).then((serialized) => ( + new FramesMetaData({ + ...serialized, + deleted_frames: Object.fromEntries(serialized.deleted_frames.map((_frame) => [_frame, true])), + }) + )); } + if (!(id in frameMetaCache) || forceReload) { - frameMetaCache[id] = serverProxy.frames.getMeta('job', id) - .then((serverMeta) => new FramesMetaData({ - ...serverMeta, - deleted_frames: Object.fromEntries(serverMeta.deleted_frames.map((_frame) => [_frame, true])), - })) - .catch((error) => { + const previousCache = frameMetaCache[id]; + frameMetaCache[id] = new Promise((resolve, reject) => { + serverProxy.frames.getMeta('job', id).then((serialized) => { + const framesMetaData = new FramesMetaData({ + ...serialized, + deleted_frames: Object.fromEntries(serialized.deleted_frames.map((_frame) => [_frame, true])), + }); + resolve(framesMetaData); + }).catch((error: unknown) => { delete frameMetaCache[id]; - throw error; + if (previousCache instanceof Promise) { + frameMetaCache[id] = previousCache; + } + reject(error); }); + }); } + return frameMetaCache[id]; } -async function saveJobMeta(meta: FramesMetaData, jobID: number): Promise { - frameMetaCache[jobID] = serverProxy.frames.saveMeta('job', jobID, { - deleted_frames: Object.keys(meta.deletedFrames).map((frame) => +frame), - }) - .then((serverMeta) => new FramesMetaData({ - ...serverMeta, - deleted_frames: Object.fromEntries(serverMeta.deleted_frames.map((_frame) => [_frame, true])), - })) - .catch((error) => { - delete frameMetaCache[jobID]; - throw error; +function saveJobMeta(meta: FramesMetaData, jobID: number): Promise { + frameMetaCache[jobID] = new Promise((resolve, reject) => { + serverProxy.frames.saveMeta('job', jobID, { + deleted_frames: Object.keys(meta.deletedFrames).map((frame) => +frame), + }).then((serverMeta) => { + const updatedMetaData = new FramesMetaData({ + ...serverMeta, + deleted_frames: Object.fromEntries(serverMeta.deleted_frames.map((_frame) => [_frame, true])), + }); + resolve(updatedMetaData); + }).catch((error) => { + frameMetaCache[jobID] = Promise.resolve(meta); + reject(error); }); + }); + return frameMetaCache[jobID]; } @@ -594,8 +612,7 @@ async function refreshJobCacheIfOutdated(jobID: number): Promise { throw new Error('Frame data cache is abscent'); } - const META_DATA_RELOAD_PERIOD = 1 * 60 * 60 * 1000; // 1 hour - const isOutdated = (Date.now() - cached.metaFetchedTimestamp) > META_DATA_RELOAD_PERIOD; + const isOutdated = (Date.now() - cached.metaFetchedTimestamp) > config.jobMetaDataReloadPeriod; if (isOutdated) { // get metadata again if outdated @@ -683,7 +700,7 @@ export function getContextImage(jobID: number, frame: number): Promise setTimeout(checkAndExecute)); } else { @@ -775,7 +792,7 @@ export async function getFrame( // - getContextImage // - getCachedChunks // And from this idea we should call refreshJobCacheIfOutdated from each one - // Hovewer, following from the order, these methods are usually called + // However, following from the order, these methods are usually called // it may lead to even more confusing behaviour // // Usually user first receives frame, then user receives ranges and finally user receives context images @@ -831,7 +848,7 @@ export async function patchMeta(jobID: number): Promise { const updatedFields = meta.getUpdated(); if (Object.keys(updatedFields).length) { - frameMetaCache[jobID] = saveJobMeta(meta, jobID); + await saveJobMeta(meta, jobID); } const newMeta = await frameMetaCache[jobID]; return newMeta; @@ -889,9 +906,7 @@ export function getJobFrameNumbers(jobID: number): number[] { } const { meta, jobStartFrame } = frameDataCache[jobID]; - return meta.getDataFrameNumbers().map((dataFrameNumber: number): number => ( - meta.getJobRelativeFrameNumber(dataFrameNumber) + jobStartFrame - )); + return meta.getSegmentFrameNumbers(jobStartFrame); } export function clear(jobID: number): void { diff --git a/cvat-core/src/index.ts b/cvat-core/src/index.ts index f361f194df73..4eff35601f70 100644 --- a/cvat-core/src/index.ts +++ b/cvat-core/src/index.ts @@ -32,9 +32,16 @@ import QualityConflict from './quality-conflict'; import QualitySettings from './quality-settings'; import AnalyticsReport from './analytics-report'; import AnnotationGuide from './guide'; -import ValidationLayout from './validation-layout'; +import { JobValidationLayout, TaskValidationLayout } from './validation-layout'; import { Request } from './request'; -import BaseSingleFrameAction, { listActions, registerAction, runActions } from './annotations-actions'; +import { + runAction, + callAction, + listActions, + registerAction, +} from './annotations-actions/annotations-actions'; +import { BaseCollectionAction } from './annotations-actions/base-collection-action'; +import { BaseShapesAction } from './annotations-actions/base-shapes-action'; import { ArgumentError, DataError, Exception, ScriptingError, ServerError, } from './exceptions'; @@ -165,7 +172,8 @@ export default interface CVATCore { actions: { list: typeof listActions; register: typeof registerAction; - run: typeof runActions; + run: typeof runAction; + call: typeof callAction; }; logger: typeof logger; config: { @@ -179,6 +187,7 @@ export default interface CVATCore { onOrganizationChange: (newOrgId: number | null) => void | null; globalObjectsCounter: typeof config.globalObjectsCounter; requestsStatusDelay: typeof config.requestsStatusDelay; + jobMetaDataReloadPeriod: typeof config.jobMetaDataReloadPeriod; }, client: { version: string; @@ -209,14 +218,16 @@ export default interface CVATCore { Organization: typeof Organization; Webhook: typeof Webhook; AnnotationGuide: typeof AnnotationGuide; - BaseSingleFrameAction: typeof BaseSingleFrameAction; + BaseShapesAction: typeof BaseShapesAction; + BaseCollectionAction: typeof BaseCollectionAction; QualityReport: typeof QualityReport; QualityConflict: typeof QualityConflict; QualitySettings: typeof QualitySettings; AnalyticsReport: typeof AnalyticsReport; Request: typeof Request; FramesMetaData: typeof FramesMetaData; - ValidationLayout: typeof ValidationLayout; + JobValidationLayout: typeof JobValidationLayout; + TaskValidationLayout: typeof TaskValidationLayout; }; utils: { mask2Rle: typeof mask2Rle; diff --git a/cvat-core/src/lambda-manager.ts b/cvat-core/src/lambda-manager.ts index 66733d7ed236..cfed3d474329 100644 --- a/cvat-core/src/lambda-manager.ts +++ b/cvat-core/src/lambda-manager.ts @@ -8,12 +8,6 @@ import { ArgumentError } from './exceptions'; import MLModel from './ml-model'; import { RQStatus, ShapeType } from './enums'; -export interface ModelProvider { - name: string; - icon: string; - attributes: Record; -} - export interface InteractorResults { mask: number[][]; points?: [number, number][]; diff --git a/cvat-core/src/object-state.ts b/cvat-core/src/object-state.ts index 9b35736a08a1..28993a0d114c 100644 --- a/cvat-core/src/object-state.ts +++ b/cvat-core/src/object-state.ts @@ -1,5 +1,5 @@ // Copyright (C) 2019-2022 Intel Corporation -// Copyright (C) 2022-2023 CVAT.ai Corporation +// Copyright (C) 2022-2024 CVAT.ai Corporation // // SPDX-License-Identifier: MIT @@ -8,6 +8,7 @@ import PluginRegistry from './plugins'; import { ArgumentError } from './exceptions'; import { Label } from './labels'; import { isEnum } from './common'; +import { SerializedShape, SerializedTag, SerializedTrack } from './server-response-types'; interface UpdateFlags { label: boolean; @@ -516,10 +517,15 @@ export default class ObjectState { const result = await PluginRegistry.apiWrapper.call(this, ObjectState.prototype.delete, frame, force); return result; } + + async export(): Promise { + const result = await PluginRegistry.apiWrapper.call(this, ObjectState.prototype.export); + return result; + } } Object.defineProperty(ObjectState.prototype.save, 'implementation', { - value: function save(): ObjectState { + value: function saveImplementation(): ObjectState { if (this.__internal && this.__internal.save) { return this.__internal.save(this); } @@ -529,8 +535,19 @@ Object.defineProperty(ObjectState.prototype.save, 'implementation', { writable: false, }); +Object.defineProperty(ObjectState.prototype.export, 'implementation', { + value: function exportImplementation(): ObjectState { + if (this.__internal && this.__internal.export) { + return this.__internal.export(this); + } + + return this; + }, + writable: false, +}); + Object.defineProperty(ObjectState.prototype.delete, 'implementation', { - value: function remove(frame: number, force: boolean): boolean { + value: function deleteImplementation(frame: number, force: boolean): boolean { if (this.__internal && this.__internal.delete) { if (!Number.isInteger(+frame) || +frame < 0) { throw new ArgumentError('Frame argument must be a non negative integer'); diff --git a/cvat-core/src/object-utils.ts b/cvat-core/src/object-utils.ts index 0c4a3e5d8143..b3592e5cbe1c 100644 --- a/cvat-core/src/object-utils.ts +++ b/cvat-core/src/object-utils.ts @@ -60,7 +60,7 @@ export function findAngleDiff(rightAngle: number, leftAngle: number): number { angleDiff = ((angleDiff + 180) % 360) - 180; if (Math.abs(angleDiff) >= 180) { // if the main arc is bigger than 180, go another arc - // to find it, just substract absolute value from 360 and inverse sign + // to find it, just subtract absolute value from 360 and inverse sign angleDiff = 360 - Math.abs(angleDiff) * Math.sign(angleDiff) * -1; } return angleDiff; @@ -360,7 +360,7 @@ export function rle2Mask(rle: number[], width: number, height: number): number[] } export function propagateShapes( - shapes: T[], from: number, to: number, + shapes: T[], from: number, to: number, frameNumbers: number[], ): T[] { const getCopy = (shape: T): SerializedShape | SerializedData => { if (shape instanceof ObjectState) { @@ -397,9 +397,18 @@ export function propagateShapes( }; }; + const targetFrameNumbers = frameNumbers.filter( + (frameNumber: number) => frameNumber >= Math.min(from, to) && + frameNumber <= Math.max(from, to) && + frameNumber !== from, + ); + const states: T[] = []; - const sign = Math.sign(to - from); - for (let frame = from + sign; sign > 0 ? frame <= to : frame >= to; frame += sign) { + for (const frame of targetFrameNumbers) { + if (frame === from) { + continue; + } + for (const shape of shapes) { const copy = getCopy(shape); diff --git a/cvat-core/src/quality-settings.ts b/cvat-core/src/quality-settings.ts index c5e3ea6974c2..7c591e371cc4 100644 --- a/cvat-core/src/quality-settings.ts +++ b/cvat-core/src/quality-settings.ts @@ -14,6 +14,11 @@ export enum TargetMetric { RECALL = 'recall', } +export enum PointSizeBase { + IMAGE_SIZE = 'image_size', + GROUP_BBOX_SIZE = 'group_bbox_size', +} + export default class QualitySettings { #id: number; #targetMetric: TargetMetric; @@ -22,6 +27,7 @@ export default class QualitySettings { #task: number; #iouThreshold: number; #oksSigma: number; + #pointSizeBase: PointSizeBase; #lineThickness: number; #lowOverlapThreshold: number; #orientedLines: boolean; @@ -32,6 +38,7 @@ export default class QualitySettings { #objectVisibilityThreshold: number; #panopticComparison: boolean; #compareAttributes: boolean; + #matchEmptyFrames: boolean; #descriptions: Record; constructor(initialData: SerializedQualitySettingsData) { @@ -42,6 +49,7 @@ export default class QualitySettings { this.#maxValidationsPerJob = initialData.max_validations_per_job; this.#iouThreshold = initialData.iou_threshold; this.#oksSigma = initialData.oks_sigma; + this.#pointSizeBase = initialData.point_size_base as PointSizeBase; this.#lineThickness = initialData.line_thickness; this.#lowOverlapThreshold = initialData.low_overlap_threshold; this.#orientedLines = initialData.compare_line_orientation; @@ -52,6 +60,7 @@ export default class QualitySettings { this.#objectVisibilityThreshold = initialData.object_visibility_threshold; this.#panopticComparison = initialData.panoptic_comparison; this.#compareAttributes = initialData.compare_attributes; + this.#matchEmptyFrames = initialData.match_empty_frames; this.#descriptions = initialData.descriptions; } @@ -79,6 +88,14 @@ export default class QualitySettings { this.#oksSigma = newVal; } + get pointSizeBase(): PointSizeBase { + return this.#pointSizeBase; + } + + set pointSizeBase(newVal: PointSizeBase) { + this.#pointSizeBase = newVal; + } + get lineThickness(): number { return this.#lineThickness; } @@ -183,6 +200,14 @@ export default class QualitySettings { this.#maxValidationsPerJob = newVal; } + get matchEmptyFrames(): boolean { + return this.#matchEmptyFrames; + } + + set matchEmptyFrames(newVal: boolean) { + this.#matchEmptyFrames = newVal; + } + get descriptions(): Record { const descriptions: Record = Object.keys(this.#descriptions).reduce((acc, key) => { const camelCaseKey = _.camelCase(key); @@ -197,6 +222,7 @@ export default class QualitySettings { const result: SerializedQualitySettingsData = { iou_threshold: this.#iouThreshold, oks_sigma: this.#oksSigma, + point_size_base: this.#pointSizeBase, line_thickness: this.#lineThickness, low_overlap_threshold: this.#lowOverlapThreshold, compare_line_orientation: this.#orientedLines, @@ -210,6 +236,7 @@ export default class QualitySettings { target_metric: this.#targetMetric, target_metric_threshold: this.#targetMetricThreshold, max_validations_per_job: this.#maxValidationsPerJob, + match_empty_frames: this.#matchEmptyFrames, }; return result; diff --git a/cvat-core/src/request.ts b/cvat-core/src/request.ts index 66ae49b4c96b..ad8aa04d45aa 100644 --- a/cvat-core/src/request.ts +++ b/cvat-core/src/request.ts @@ -6,10 +6,10 @@ import { RQStatus } from './enums'; import User from './user'; import { SerializedRequest } from './server-response-types'; -type Operation = { +export type RequestOperation = { target: string; type: string; - format: string; + format: string | null; jobID: number | null; taskID: number | null; projectID: number | null; @@ -44,9 +44,7 @@ export class Request { this.#finishedDate = initialData.finished_date; this.#expiryDate = initialData.expiry_date; - if (initialData.owner) { - this.#owner = new User(initialData.owner); - } + this.#owner = new User(initialData.owner); } get id(): string { @@ -57,7 +55,8 @@ export class Request { return this.#status.toLowerCase() as RQStatus; } - get progress(): number { + // The `progress` represents a value between 0 and 1 + get progress(): number | undefined { return this.#progress; } @@ -65,7 +64,7 @@ export class Request { return this.#message; } - get operation(): Operation { + get operation(): RequestOperation { return { target: this.#operation.target, type: this.#operation.type, @@ -77,11 +76,11 @@ export class Request { }; } - get url(): string { + get url(): string | undefined { return this.#resultUrl; } - get resultID(): number { + get resultID(): number | undefined { return this.#resultID; } @@ -89,19 +88,49 @@ export class Request { return this.#createdDate; } - get startedDate(): string { + get startedDate(): string | undefined { return this.#startedDate; } - get finishedDate(): string { + get finishedDate(): string | undefined { return this.#finishedDate; } - get expiryDate(): string { + get expiryDate(): string | undefined { return this.#expiryDate; } get owner(): User { return this.#owner; } + + public toJSON(): SerializedRequest { + const result: SerializedRequest = { + id: this.#id, + status: this.#status, + operation: { + target: this.#operation.target, + type: this.#operation.type, + format: this.#operation.format, + job_id: this.#operation.job_id, + task_id: this.#operation.task_id, + project_id: this.#operation.project_id, + function_id: this.#operation.function_id, + }, + progress: this.#progress, + message: this.#message, + result_url: this.#resultUrl, + result_id: this.#resultID, + created_date: this.#createdDate, + started_date: this.#startedDate, + finished_date: this.#finishedDate, + expiry_date: this.#expiryDate, + owner: { + id: this.#owner.id, + username: this.#owner.username, + }, + }; + + return result; + } } diff --git a/cvat-core/src/requests-manager.ts b/cvat-core/src/requests-manager.ts index 429c42dba2f3..800b577242c8 100644 --- a/cvat-core/src/requests-manager.ts +++ b/cvat-core/src/requests-manager.ts @@ -34,7 +34,7 @@ class RequestsManager { requestDelayIdx: number | null, request: Request | null, timeout: number | null; - promise?: Promise; + promise: Promise; }>; private requestStack: number[]; @@ -71,10 +71,11 @@ class RequestsManager { } return this.listening[requestID].promise; } + const promise = new Promise((resolve, reject) => { const timeoutCallback = async (): Promise => { // We make sure that no more than REQUESTS_COUNT requests are sent simultaneously - // If thats the case, we re-schedule the timeout + // If that's the case, we re-schedule the timeout const timestamp = Date.now(); if (this.requestStack.length >= REQUESTS_COUNT) { const timestampToCheck = this.requestStack[this.requestStack.length - 1]; @@ -122,35 +123,38 @@ class RequestsManager { } } catch (error) { if (requestID in this.listening) { - const { onUpdate } = this.listening[requestID]; + const { onUpdate, request } = this.listening[requestID]; + if (request) { + onUpdate + .forEach((update) => update(new Request({ + ...request.toJSON(), + status: RQStatus.FAILED, + message: `Could not get a status of the request ${requestID}. ${error.toString()}`, + }))); + } - onUpdate - .forEach((update) => update(new Request({ - id: requestID, - status: RQStatus.FAILED, - message: `Could not get a status of the request ${requestID}. ${error.toString()}`, - }))); + delete this.listening[requestID]; reject(error); } } }; - if (initialRequest?.status === RQStatus.FAILED) { - reject(new RequestError(initialRequest?.message)); - } else { - this.listening[requestID] = { - onUpdate: callback ? [callback] : [], - timeout: window.setTimeout(timeoutCallback), - request: initialRequest, - requestDelayIdx: 0, - }; - } + Promise.resolve().then(() => { + // running as microtask to make sure "promise" was initialized + if (initialRequest?.status === RQStatus.FAILED) { + reject(new RequestError(initialRequest?.message)); + } else { + this.listening[requestID] = { + onUpdate: callback ? [callback] : [], + timeout: window.setTimeout(timeoutCallback), + request: initialRequest, + requestDelayIdx: 0, + promise, + }; + } + }); }); - this.listening[requestID] = { - ...this.listening[requestID], - promise, - }; return promise; } diff --git a/cvat-core/src/server-proxy.ts b/cvat-core/src/server-proxy.ts index 7e8819808649..37f2337c0e52 100644 --- a/cvat-core/src/server-proxy.ts +++ b/cvat-core/src/server-proxy.ts @@ -19,9 +19,9 @@ import { SerializedInvitationData, SerializedCloudStorage, SerializedFramesMetaData, SerializedCollection, SerializedQualitySettingsData, APIQualitySettingsFilter, SerializedQualityConflictData, APIQualityConflictsFilter, SerializedQualityReportData, APIQualityReportsFilter, SerializedAnalyticsReport, APIAnalyticsReportFilter, - SerializedRequest, SerializedValidationLayout, + SerializedRequest, SerializedJobValidationLayout, SerializedTaskValidationLayout, } from './server-response-types'; -import { PaginatedResource } from './core-types'; +import { PaginatedResource, UpdateStatusData } from './core-types'; import { Request } from './request'; import { Storage } from './storage'; import { SerializedEvent } from './event'; @@ -102,7 +102,7 @@ function fetchAll(url, filter = {}): Promise { } }); - // removing possible dublicates + // removing possible duplicates const obj = result.results.reduce((acc: Record, item: any) => { acc[item.id] = item; return acc; @@ -1069,7 +1069,7 @@ type LongProcessListener = Record, taskDataSpec: any, - onUpdate: (request: Request) => void, + onUpdate: (request: Request | UpdateStatusData) => void, ): Promise<{ taskID: number, rqID: string }> { const { backendAPI, origin } = config; // keep current default params to 'freeze" them during this request @@ -1104,11 +1104,11 @@ async function createTask( let response = null; - onUpdate(new Request({ + onUpdate({ status: RQStatus.UNKNOWN, progress: 0, message: 'CVAT is creating your task', - })); + }); try { response = await Axios.post(`${backendAPI}/tasks`, taskSpec, { @@ -1118,11 +1118,11 @@ async function createTask( throw generateError(errorData); } - onUpdate(new Request({ + onUpdate({ status: RQStatus.UNKNOWN, progress: 0, message: 'CVAT is uploading task data to the server', - })); + }); async function bulkUpload(taskId, files) { const fileBulks = files.reduce((fileGroups, file) => { @@ -1142,11 +1142,11 @@ async function createTask( taskData.append(`client_files[${idx}]`, element); } const percentage = totalSentSize / totalSize; - onUpdate(new Request({ + onUpdate({ status: RQStatus.UNKNOWN, progress: percentage, message: 'CVAT is uploading task data to the server', - })); + }); await Axios.post(`${backendAPI}/tasks/${taskId}/data`, taskData, { ...params, headers: { 'Upload-Multiple': true }, @@ -1170,11 +1170,11 @@ async function createTask( const uploadConfig = { endpoint: `${origin}${backendAPI}/tasks/${response.data.id}/data/`, onUpdate: (percentage) => { - onUpdate(new Request({ + onUpdate({ status: RQStatus.UNKNOWN, progress: percentage, message: 'CVAT is uploading task data to the server', - })); + }); }, chunkSize, totalSize, @@ -1384,7 +1384,7 @@ async function deleteJob(jobID: number): Promise { const validationLayout = (instance: 'tasks' | 'jobs') => async ( id: number, -): Promise => { +): Promise => { const { backendAPI } = config; try { @@ -2250,16 +2250,32 @@ async function getRequestsList(): Promise> } } +// Temporary solution for server availability problems +const retryTimeouts = [5000, 10000, 15000]; async function getRequestStatus(rqID: string): Promise { const { backendAPI } = config; + let retryCount = 0; + let lastError = null; - try { - const response = await Axios.get(`${backendAPI}/requests/${rqID}`); + while (retryCount < 3) { + try { + const response = await Axios.get(`${backendAPI}/requests/${rqID}`); - return response.data; - } catch (errorData) { - throw generateError(errorData); + return response.data; + } catch (errorData) { + lastError = generateError(errorData); + const { response } = errorData; + if (response && [502, 503, 504].includes(response.status)) { + const timeout = retryTimeouts[retryCount]; + await new Promise((resolve) => { setTimeout(resolve, timeout); }); + retryCount++; + } else { + throw generateError(errorData); + } + } } + + throw lastError; } async function cancelRequest(requestID): Promise { diff --git a/cvat-core/src/server-response-types.ts b/cvat-core/src/server-response-types.ts index 5dd8cc3d54d2..ea97c0730aaa 100644 --- a/cvat-core/src/server-response-types.ts +++ b/cvat-core/src/server-response-types.ts @@ -47,13 +47,14 @@ export interface SerializedUser { first_name: string; last_name: string; email?: string; - groups?: ('user' | 'business' | 'admin')[]; + groups?: ('user' | 'admin')[]; is_staff?: boolean; is_superuser?: boolean; is_active?: boolean; last_login?: string; date_joined?: string; email_verification_required: boolean; + has_analytics_access: boolean; } interface SerializedStorage { @@ -246,6 +247,7 @@ export interface SerializedQualitySettingsData { max_validations_per_job?: number; iou_threshold?: number; oks_sigma?: number; + point_size_base?: string; line_thickness?: number; low_overlap_threshold?: number; compare_line_orientation?: boolean; @@ -256,6 +258,7 @@ export interface SerializedQualitySettingsData { object_visibility_threshold?: number; panoptic_comparison?: boolean; compare_attributes?: boolean; + match_empty_frames?: boolean; descriptions?: Record; } @@ -503,29 +506,36 @@ export interface SerializedAPISchema { } export interface SerializedRequest { - id?: string; + id: string; + message: string; status: string; - operation?: { + operation: { target: string; type: string; - format: string; + format: string | null; job_id: number | null; task_id: number | null; project_id: number | null; + function_id: string | null; }; progress?: number; - message: string; result_url?: string; result_id?: number; - created_date?: string; + created_date: string; started_date?: string; finished_date?: string; expiry_date?: string; - owner?: any; + owner: any; } -export interface SerializedValidationLayout { +export interface SerializedJobValidationLayout { honeypot_count?: number; honeypot_frames?: number[]; honeypot_real_frames?: number[]; } + +export interface SerializedTaskValidationLayout extends SerializedJobValidationLayout { + mode: 'gt' | 'gt_pool' | null; + validation_frames?: number[]; + disabled_frames?: number[]; +} diff --git a/cvat-core/src/session-implementation.ts b/cvat-core/src/session-implementation.ts index 38728a409448..7ea9e326fb8b 100644 --- a/cvat-core/src/session-implementation.ts +++ b/cvat-core/src/session-implementation.ts @@ -27,7 +27,10 @@ import { decodePreview, } from './frames'; import Issue from './issue'; -import { SerializedLabel, SerializedTask, SerializedValidationLayout } from './server-response-types'; +import { + SerializedLabel, SerializedTask, SerializedJobValidationLayout, + SerializedTaskValidationLayout, +} from './server-response-types'; import { checkInEnum, checkObjectType } from './common'; import { getCollection, getSaver, clearAnnotations, getAnnotations, @@ -37,7 +40,7 @@ import AnnotationGuide from './guide'; import requestsManager from './requests-manager'; import { Request } from './request'; import User from './user'; -import ValidationLayout from './validation-layout'; +import { JobValidationLayout, TaskValidationLayout } from './validation-layout'; // must be called with task/job context async function deleteFrameWrapper(jobID, frame): Promise { @@ -171,7 +174,7 @@ export function implementJob(Job: typeof JobClass): typeof JobClass { ): ReturnType { const result = await serverProxy.jobs.validationLayout(this.id); if (Object.keys(result).length) { - return new ValidationLayout(result as Required); + return new JobValidationLayout(result as SerializedJobValidationLayout); } return null; @@ -374,7 +377,7 @@ export function implementJob(Job: typeof JobClass): typeof JobClass { } if ('annotationsFilters' in searchParameters && 'generalFilters' in searchParameters) { - throw new ArgumentError('Both annotations filters and general fiters could not be used together'); + throw new ArgumentError('Both annotations filters and general filters could not be used together'); } if (!Number.isInteger(frameFrom) || !Number.isInteger(frameTo)) { @@ -516,6 +519,18 @@ export function implementJob(Job: typeof JobClass): typeof JobClass { }, }); + Object.defineProperty(Job.prototype.annotations.commit, 'implementation', { + value: function commitAnnotationsImplementation( + this: JobClass, + added: Parameters[0], + removed: Parameters[1], + frame: Parameters[2], + ): ReturnType { + getCollection(this).commit(added, removed, frame); + return Promise.resolve(); + }, + }); + Object.defineProperty(Job.prototype.annotations.upload, 'implementation', { value: async function uploadAnnotationsImplementation( this: JobClass, @@ -641,9 +656,9 @@ export function implementTask(Task: typeof TaskClass): typeof TaskClass { value: async function validationLayoutImplementation( this: TaskClass, ): ReturnType { - const result = await serverProxy.tasks.validationLayout(this.id); - if (Object.keys(result).length) { - return new ValidationLayout(result as Required); + const result = await serverProxy.tasks.validationLayout(this.id) as SerializedTaskValidationLayout; + if (result.mode !== null) { + return new TaskValidationLayout(result); } return null; @@ -753,12 +768,12 @@ export function implementTask(Task: typeof TaskClass): typeof TaskClass { const { taskID, rqID } = await serverProxy.tasks.create( taskSpec, taskDataSpec, - options?.requestStatusCallback || (() => {}), + options?.updateStatusCallback || (() => {}), ); await requestsManager.listen(rqID, { callback: (request: Request) => { - options?.requestStatusCallback(request); + options?.updateStatusCallback(request); if (request.status === RQStatus.FAILED) { serverProxy.tasks.delete(taskID, config.organization.organizationSlug || null); } @@ -876,6 +891,14 @@ export function implementTask(Task: typeof TaskClass): typeof TaskClass { }, }); + Object.defineProperty(Task.prototype.frames.frameNumbers, 'implementation', { + value: function includedFramesImplementation( + this: TaskClass, + ): ReturnType { + throw new Error('Not implemented for Task'); + }, + }); + Object.defineProperty(Task.prototype.frames.preview, 'implementation', { value: function previewImplementation( this: TaskClass, @@ -1043,7 +1066,7 @@ export function implementTask(Task: typeof TaskClass): typeof TaskClass { } if ('annotationsFilters' in searchParameters && 'generalFilters' in searchParameters) { - throw new ArgumentError('Both annotations filters and general fiters could not be used together'); + throw new ArgumentError('Both annotations filters and general filters could not be used together'); } if (!Number.isInteger(frameFrom) || !Number.isInteger(frameTo)) { @@ -1197,6 +1220,18 @@ export function implementTask(Task: typeof TaskClass): typeof TaskClass { }, }); + Object.defineProperty(Task.prototype.annotations.commit, 'implementation', { + value: function commitAnnotationsImplementation( + this: TaskClass, + added: Parameters[0], + removed: Parameters[1], + frame: Parameters[2], + ): ReturnType { + getCollection(this).commit(added, removed, frame); + return Promise.resolve(); + }, + }); + Object.defineProperty(Task.prototype.annotations.exportDataset, 'implementation', { value: async function exportDatasetImplementation( this: TaskClass, diff --git a/cvat-core/src/session.ts b/cvat-core/src/session.ts index cf82aa9a050c..b3269ee78076 100644 --- a/cvat-core/src/session.ts +++ b/cvat-core/src/session.ts @@ -28,7 +28,8 @@ import { Request } from './request'; import logger from './logger'; import Issue from './issue'; import ObjectState from './object-state'; -import ValidationLayout from './validation-layout'; +import { JobValidationLayout, TaskValidationLayout } from './validation-layout'; +import { UpdateStatusData } from './core-types'; function buildDuplicatedAPI(prototype) { Object.defineProperties(prototype, { @@ -171,6 +172,17 @@ function buildDuplicatedAPI(prototype) { return result; }, + async commit(added, removed, frame) { + const result = await PluginRegistry.apiWrapper.call( + this, + prototype.annotations.commit, + added, + removed, + frame, + ); + return result; + }, + async exportDataset( format: string, saveImages: boolean, @@ -331,7 +343,7 @@ export class Session { delTrackKeyframesOnly?: boolean; }) => Promise; save: ( - onUpdate ?: (message: string) => void, + onUpdate?: (message: string) => void, ) => Promise; search: ( frameFrom: number, @@ -360,6 +372,11 @@ export class Session { }>; import: (data: Omit) => Promise; export: () => Promise>; + commit: ( + added: Omit, + removed: Omit, + frame: number, + ) => Promise; statistics: () => Promise; hasUnsavedChanges: () => boolean; exportDataset: ( @@ -372,8 +389,8 @@ export class Session { }; public actions: { - undo: (count: number) => Promise; - redo: (count: number) => Promise; + undo: (count?: number) => Promise; + redo: (count?: number) => Promise; freeze: (frozen: boolean) => Promise; clear: () => Promise; get: () => Promise<{ undo: [HistoryActions, number][], redo: [HistoryActions, number][] }>; @@ -402,8 +419,8 @@ export class Session { public logger: { log: ( scope: Parameters[0], - payload: Parameters[1], - wait: Parameters[2], + payload?: Parameters[1], + wait?: Parameters[2], ) => ReturnType; }; @@ -430,6 +447,7 @@ export class Session { select: Object.getPrototypeOf(this).annotations.select.bind(this), import: Object.getPrototypeOf(this).annotations.import.bind(this), export: Object.getPrototypeOf(this).annotations.export.bind(this), + commit: Object.getPrototypeOf(this).annotations.commit.bind(this), statistics: Object.getPrototypeOf(this).annotations.statistics.bind(this), hasUnsavedChanges: Object.getPrototypeOf(this).annotations.hasUnsavedChanges.bind(this), exportDataset: Object.getPrototypeOf(this).annotations.exportDataset.bind(this), @@ -462,7 +480,7 @@ export class Session { } } -type InitializerType = Readonly & { labels?: SerializedLabel[] }>; +type InitializerType = Readonly & { labels?: SerializedLabel[] }>>; export class Job extends Session { #data: { @@ -686,7 +704,7 @@ export class Job extends Session { return result; } - async validationLayout(): Promise { + async validationLayout(): Promise { const result = await PluginRegistry.apiWrapper.call(this, Job.prototype.validationLayout); return result; } @@ -1141,7 +1159,7 @@ export class Task extends Session { async save( fields: Record = {}, - options?: { requestStatusCallback?: (request: Request) => void }, + options?: { updateStatusCallback?: (updateData: Request | UpdateStatusData) => void }, ): Promise { const result = await PluginRegistry.apiWrapper.call(this, Task.prototype.save, fields, options); return result; @@ -1186,7 +1204,7 @@ export class Task extends Session { return result; } - async validationLayout(): Promise { + async validationLayout(): Promise { const result = await PluginRegistry.apiWrapper.call(this, Task.prototype.validationLayout); return result; } diff --git a/cvat-core/src/user.ts b/cvat-core/src/user.ts index 1b0eb5ecfec9..ef28f3633f0e 100644 --- a/cvat-core/src/user.ts +++ b/cvat-core/src/user.ts @@ -1,5 +1,5 @@ // Copyright (C) 2019-2022 Intel Corporation -// Copyright (C) 2022 CVAT.ai Corporation +// Copyright (C) 2022-2024 CVAT.ai Corporation // // SPDX-License-Identifier: MIT @@ -11,13 +11,14 @@ export default class User { public readonly email: string; public readonly firstName: string; public readonly lastName: string; - public readonly groups: ('user' | 'business' | 'admin')[]; + public readonly groups: ('user' | 'admin')[]; public readonly lastLogin: string; public readonly dateJoined: string; public readonly isStaff: boolean; public readonly isSuperuser: boolean; public readonly isActive: boolean; public readonly isVerified: boolean; + public readonly hasAnalyticsAccess: boolean; constructor(initialData: SerializedUser) { const data = { @@ -33,6 +34,7 @@ export default class User { is_superuser: null, is_active: null, email_verification_required: null, + has_analytics_access: null, }; for (const property in data) { @@ -80,6 +82,9 @@ export default class User { isVerified: { get: () => !data.email_verification_required, }, + hasAnalyticsAccess: { + get: () => data.has_analytics_access, + }, }), ); } @@ -98,6 +103,7 @@ export default class User { is_superuser: this.isSuperuser, is_active: this.isActive, email_verification_required: this.isVerified, + has_analytics_access: this.hasAnalyticsAccess, }; } } diff --git a/cvat-core/src/validation-layout.ts b/cvat-core/src/validation-layout.ts index ba5a94aa03a9..064af13b2514 100644 --- a/cvat-core/src/validation-layout.ts +++ b/cvat-core/src/validation-layout.ts @@ -2,37 +2,43 @@ // // SPDX-License-Identifier: MIT -import { SerializedValidationLayout } from 'server-response-types'; +import { SerializedJobValidationLayout, SerializedTaskValidationLayout } from 'server-response-types'; import PluginRegistry from './plugins'; -export default class ValidationLayout { - #honeypotFrames: number[]; - #honeypotRealFrames: number[]; +export class JobValidationLayout { + #honeypotCount: JobValidationLayout['honeypotCount']; + #honeypotFrames: JobValidationLayout['honeypotFrames']; + #honeypotRealFrames: JobValidationLayout['honeypotRealFrames']; - public constructor(data: Required) { - this.#honeypotFrames = [...data.honeypot_frames]; - this.#honeypotRealFrames = [...data.honeypot_real_frames]; + public constructor(data: SerializedJobValidationLayout) { + this.#honeypotCount = data.honeypot_count ?? 0; + this.#honeypotFrames = [...(data.honeypot_frames ?? [])]; + this.#honeypotRealFrames = [...(data.honeypot_real_frames ?? [])]; } - public get honeypotFrames() { + public get honeypotCount(): number { + return this.#honeypotCount; + } + + public get honeypotFrames(): number[] { return [...this.#honeypotFrames]; } - public get honeypotRealFrames() { + public get honeypotRealFrames(): number[] { return [...this.#honeypotRealFrames]; } async getRealFrame(frame: number): Promise { - const result = await PluginRegistry.apiWrapper.call(this, ValidationLayout.prototype.getRealFrame, frame); + const result = await PluginRegistry.apiWrapper.call(this, JobValidationLayout.prototype.getRealFrame, frame); return result; } } -Object.defineProperties(ValidationLayout.prototype.getRealFrame, { +Object.defineProperties(JobValidationLayout.prototype.getRealFrame, { implementation: { writable: false, enumerable: false, - value: function implementation(this: ValidationLayout, frame: number): number | null { + value: function implementation(this: JobValidationLayout, frame: number): number | null { const index = this.honeypotFrames.indexOf(frame); if (index !== -1) { return this.honeypotRealFrames[index]; @@ -42,3 +48,28 @@ Object.defineProperties(ValidationLayout.prototype.getRealFrame, { }, }, }); + +export class TaskValidationLayout extends JobValidationLayout { + #mode: TaskValidationLayout['mode']; + #validationFrames: TaskValidationLayout['validationFrames']; + #disabledFrames: TaskValidationLayout['disabledFrames']; + + public constructor(data: SerializedTaskValidationLayout) { + super(data); + this.#mode = data.mode; + this.#validationFrames = [...(data.validation_frames ?? [])]; + this.#disabledFrames = [...(data.disabled_frames ?? [])]; + } + + public get mode(): NonNullable { + return this.#mode; + } + + public get validationFrames(): number[] { + return [...this.#validationFrames]; + } + + public get disabledFrames(): number[] { + return [...this.#disabledFrames]; + } +} diff --git a/cvat-data/src/ts/3rdparty/README.md b/cvat-data/src/ts/3rdparty/README.md index 32ff0a20ab50..2bcd37af45b9 100644 --- a/cvat-data/src/ts/3rdparty/README.md +++ b/cvat-data/src/ts/3rdparty/README.md @@ -10,8 +10,8 @@ These files are from the [Broadway.js](https://github.com/mbebenita/Broadway) re Authors don't provide an npm package, so we need to store these components in our repository. We use this dependency to decode video chunks from a server and split them to frames on client side. -We need to run this package in node environent (for example for debug, or for running unit tests). -But there aren't any ways to do that (even with syntetic environment, provided for example by the package ``browser-env``). +We need to run this package in node environment (for example for debug, or for running unit tests). +But there aren't any ways to do that (even with synthetic environment, provided for example by the package ``browser-env``). For example there are issues with canvas using (webpack doesn't work with binary canvas package for node-js) and others. So, we have solved to write patch file for this library. It modifies source code a little to support our scenario of using. diff --git a/cvat-data/src/ts/unzip_imgs.worker.ts b/cvat-data/src/ts/unzip_imgs.worker.ts index 70d8299e7c38..4ca131a09955 100644 --- a/cvat-data/src/ts/unzip_imgs.worker.ts +++ b/cvat-data/src/ts/unzip_imgs.worker.ts @@ -34,7 +34,7 @@ onmessage = (e) => { .async('blob') .then((fileData) => { if (!errored) { - // do not need to read the rest of block if an error already occured + // do not need to read the rest of block if an error already occurred if (dimension === dimension2D) { createImageBitmap(fileData).then((img) => { postMessage({ diff --git a/cvat-sdk/README.md b/cvat-sdk/README.md index fa68c0e5d40d..89702c02abd4 100644 --- a/cvat-sdk/README.md +++ b/cvat-sdk/README.md @@ -20,7 +20,14 @@ To install a prebuilt package, run the following command in the terminal: pip install cvat-sdk ``` -To use the PyTorch adapter, request the `pytorch` extra: +To use the `cvat_sdk.masks` module, request the `masks` extra: + +```bash +pip install "cvat-sdk[masks]" +``` + +To use the PyTorch adapter or the built-in PyTorch-based auto-annotation functions, +request the `pytorch` extra: ```bash pip install "cvat-sdk[pytorch]" diff --git a/cvat-sdk/cvat_sdk/auto_annotation/__init__.py b/cvat-sdk/cvat_sdk/auto_annotation/__init__.py index e5dbdf9fcc42..adbb6007e125 100644 --- a/cvat-sdk/cvat_sdk/auto_annotation/__init__.py +++ b/cvat-sdk/cvat_sdk/auto_annotation/__init__.py @@ -10,8 +10,27 @@ keypoint, keypoint_spec, label_spec, + mask, + polygon, rectangle, shape, skeleton, skeleton_label_spec, ) + +__all__ = [ + "annotate_task", + "BadFunctionError", + "DetectionFunction", + "DetectionFunctionContext", + "DetectionFunctionSpec", + "keypoint_spec", + "keypoint", + "label_spec", + "mask", + "polygon", + "rectangle", + "shape", + "skeleton_label_spec", + "skeleton", +] diff --git a/cvat-sdk/cvat_sdk/auto_annotation/driver.py b/cvat-sdk/cvat_sdk/auto_annotation/driver.py index d6294f44f8f6..5ffdb36f5bee 100644 --- a/cvat-sdk/cvat_sdk/auto_annotation/driver.py +++ b/cvat-sdk/cvat_sdk/auto_annotation/driver.py @@ -3,7 +3,8 @@ # SPDX-License-Identifier: MIT import logging -from typing import List, Mapping, Optional, Sequence +from collections.abc import Mapping, Sequence +from typing import Optional import attrs @@ -98,9 +99,11 @@ def __init__( ds_labels: Sequence[models.ILabel], *, allow_unmatched_labels: bool, + conv_mask_to_poly: bool, ) -> None: self._logger = logger self._allow_unmatched_labels = allow_unmatched_labels + self._conv_mask_to_poly = conv_mask_to_poly ds_labels_by_name = {ds_label.name: ds_label for ds_label in ds_labels} @@ -119,7 +122,7 @@ def __init__( fun_label, ds_labels_by_name ) - def validate_and_remap(self, shapes: List[models.LabeledShapeRequest], ds_frame: int) -> None: + def validate_and_remap(self, shapes: list[models.LabeledShapeRequest], ds_frame: int) -> None: new_shapes = [] for shape in shapes: @@ -216,12 +219,19 @@ def validate_and_remap(self, shapes: List[models.LabeledShapeRequest], ds_frame: if getattr(shape, "elements", None): raise BadFunctionError("function output non-skeleton shape with elements") + if shape.type.value == "mask" and self._conv_mask_to_poly: + raise BadFunctionError( + "function output mask shape despite conv_mask_to_poly=True" + ) + shapes[:] = new_shapes -@attrs.frozen +@attrs.frozen(kw_only=True) class _DetectionFunctionContextImpl(DetectionFunctionContext): frame_name: str + conf_threshold: Optional[float] = None + conv_mask_to_poly: bool = False def annotate_task( @@ -232,6 +242,8 @@ def annotate_task( pbar: Optional[ProgressReporter] = None, clear_existing: bool = False, allow_unmatched_labels: bool = False, + conf_threshold: Optional[float] = None, + conv_mask_to_poly: bool = False, ) -> None: """ Downloads data for the task with the given ID, applies the given function to it @@ -263,11 +275,21 @@ def annotate_task( function declares a label in its spec that has no corresponding label in the task. If it's set to true, then such labels are allowed, and any annotations returned by the function that refer to this label are ignored. Otherwise, BadFunctionError is raised. + + The conf_threshold parameter must be None or a number between 0 and 1. It will be passed + to the AA function as the conf_threshold attribute of the context object. + + The conv_mask_to_poly parameter will be passed to the AA function as the conv_mask_to_poly + attribute of the context object. If it's true, and the AA function returns any mask shapes, + BadFunctionError will be raised. """ if pbar is None: pbar = NullProgressReporter() + if conf_threshold is not None and not 0 <= conf_threshold <= 1: + raise ValueError("conf_threshold must be None or a number between 0 and 1") + dataset = TaskDataset(client, task_id, load_annotations=False) assert isinstance(function.spec, DetectionFunctionSpec) @@ -277,6 +299,7 @@ def annotate_task( function.spec.labels, dataset.labels, allow_unmatched_labels=allow_unmatched_labels, + conv_mask_to_poly=conv_mask_to_poly, ) shapes = [] @@ -284,12 +307,17 @@ def annotate_task( with pbar.task(total=len(dataset.samples), unit="samples"): for sample in pbar.iter(dataset.samples): frame_shapes = function.detect( - _DetectionFunctionContextImpl(sample.frame_name), sample.media.load_image() + _DetectionFunctionContextImpl( + frame_name=sample.frame_name, + conf_threshold=conf_threshold, + conv_mask_to_poly=conv_mask_to_poly, + ), + sample.media.load_image(), ) mapper.validate_and_remap(frame_shapes, sample.frame_index) shapes.extend(frame_shapes) - client.logger.info("Uploading annotations to task %d", task_id) + client.logger.info("Uploading annotations to task %d...", task_id) if clear_existing: client.tasks.api.update_annotations( @@ -301,3 +329,5 @@ def annotate_task( task_id, patched_labeled_data_request=models.PatchedLabeledDataRequest(shapes=shapes), ) + + client.logger.info("Upload complete") diff --git a/cvat-sdk/cvat_sdk/auto_annotation/functions/_torchvision.py b/cvat-sdk/cvat_sdk/auto_annotation/functions/_torchvision.py new file mode 100644 index 000000000000..9fa88e0a7c07 --- /dev/null +++ b/cvat-sdk/cvat_sdk/auto_annotation/functions/_torchvision.py @@ -0,0 +1,26 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +from functools import cached_property + +import torchvision.models + +import cvat_sdk.auto_annotation as cvataa + + +class TorchvisionFunction: + def __init__(self, model_name: str, weights_name: str = "DEFAULT", **kwargs) -> None: + weights_enum = torchvision.models.get_model_weights(model_name) + self._weights = weights_enum[weights_name] + self._transforms = self._weights.transforms() + self._model = torchvision.models.get_model(model_name, weights=self._weights, **kwargs) + self._model.eval() + + @cached_property + def spec(self) -> cvataa.DetectionFunctionSpec: + return cvataa.DetectionFunctionSpec( + labels=[ + cvataa.label_spec(cat, i) for i, cat in enumerate(self._weights.meta["categories"]) + ] + ) diff --git a/cvat-sdk/cvat_sdk/auto_annotation/functions/torchvision_detection.py b/cvat-sdk/cvat_sdk/auto_annotation/functions/torchvision_detection.py index 57457d742256..b16e4d8874ae 100644 --- a/cvat-sdk/cvat_sdk/auto_annotation/functions/torchvision_detection.py +++ b/cvat-sdk/cvat_sdk/auto_annotation/functions/torchvision_detection.py @@ -2,39 +2,26 @@ # # SPDX-License-Identifier: MIT -from functools import cached_property -from typing import List - import PIL.Image -import torchvision.models import cvat_sdk.auto_annotation as cvataa import cvat_sdk.models as models +from ._torchvision import TorchvisionFunction -class _TorchvisionDetectionFunction: - def __init__(self, model_name: str, weights_name: str = "DEFAULT", **kwargs) -> None: - weights_enum = torchvision.models.get_model_weights(model_name) - self._weights = weights_enum[weights_name] - self._transforms = self._weights.transforms() - self._model = torchvision.models.get_model(model_name, weights=self._weights, **kwargs) - self._model.eval() - - @cached_property - def spec(self) -> cvataa.DetectionFunctionSpec: - return cvataa.DetectionFunctionSpec( - labels=[ - cvataa.label_spec(cat, i) for i, cat in enumerate(self._weights.meta["categories"]) - ] - ) - def detect(self, context, image: PIL.Image.Image) -> List[models.LabeledShapeRequest]: +class _TorchvisionDetectionFunction(TorchvisionFunction): + def detect( + self, context: cvataa.DetectionFunctionContext, image: PIL.Image.Image + ) -> list[models.LabeledShapeRequest]: + conf_threshold = context.conf_threshold or 0 results = self._model([self._transforms(image)]) return [ cvataa.rectangle(label.item(), [x.item() for x in box]) for result in results - for box, label in zip(result["boxes"], result["labels"]) + for box, label, score in zip(result["boxes"], result["labels"], result["scores"]) + if score >= conf_threshold ] diff --git a/cvat-sdk/cvat_sdk/auto_annotation/functions/torchvision_instance_segmentation.py b/cvat-sdk/cvat_sdk/auto_annotation/functions/torchvision_instance_segmentation.py new file mode 100644 index 000000000000..6aa891811f5b --- /dev/null +++ b/cvat-sdk/cvat_sdk/auto_annotation/functions/torchvision_instance_segmentation.py @@ -0,0 +1,70 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import math +from collections.abc import Iterator + +import numpy as np +import PIL.Image +from skimage import measure +from torch import Tensor + +import cvat_sdk.auto_annotation as cvataa +import cvat_sdk.models as models +from cvat_sdk.masks import encode_mask + +from ._torchvision import TorchvisionFunction + + +def _is_positively_oriented(contour: np.ndarray) -> bool: + ys, xs = contour.T + + # This is the shoelace formula, except we only need the sign of the result, + # so we compare instead of subtracting. Compared to the typical formula, + # the sign is inverted, because the Y axis points downwards. + return np.sum(xs * np.roll(ys, -1)) < np.sum(ys * np.roll(xs, -1)) + + +def _generate_shapes( + context: cvataa.DetectionFunctionContext, box: Tensor, mask: Tensor, label: Tensor +) -> Iterator[models.LabeledShapeRequest]: + LEVEL = 0.5 + + if context.conv_mask_to_poly: + # Since we treat mask values of exactly LEVEL as true, we'd like them + # to also be considered high by find_contours. And for that, the level + # parameter must be slightly less than LEVEL. + contours = measure.find_contours(mask[0].detach().numpy(), level=math.nextafter(LEVEL, 0)) + + for contour in contours: + if len(contour) < 3 or _is_positively_oriented(contour): + continue + + contour = measure.approximate_polygon(contour, tolerance=2.5) + + yield cvataa.polygon(label.item(), contour[:, ::-1].ravel().tolist()) + + else: + yield cvataa.mask(label.item(), encode_mask(mask[0] >= LEVEL, box.tolist())) + + +class _TorchvisionInstanceSegmentationFunction(TorchvisionFunction): + def detect( + self, context: cvataa.DetectionFunctionContext, image: PIL.Image.Image + ) -> list[models.LabeledShapeRequest]: + conf_threshold = context.conf_threshold or 0 + results = self._model([self._transforms(image)]) + + return [ + shape + for result in results + for box, mask, label, score in zip( + result["boxes"], result["masks"], result["labels"], result["scores"] + ) + if score >= conf_threshold + for shape in _generate_shapes(context, box, mask, label) + ] + + +create = _TorchvisionInstanceSegmentationFunction diff --git a/cvat-sdk/cvat_sdk/auto_annotation/functions/torchvision_keypoint_detection.py b/cvat-sdk/cvat_sdk/auto_annotation/functions/torchvision_keypoint_detection.py index b4eb47d476d3..4d2250d61c35 100644 --- a/cvat-sdk/cvat_sdk/auto_annotation/functions/torchvision_keypoint_detection.py +++ b/cvat-sdk/cvat_sdk/auto_annotation/functions/torchvision_keypoint_detection.py @@ -3,23 +3,16 @@ # SPDX-License-Identifier: MIT from functools import cached_property -from typing import List import PIL.Image -import torchvision.models import cvat_sdk.auto_annotation as cvataa import cvat_sdk.models as models +from ._torchvision import TorchvisionFunction -class _TorchvisionKeypointDetectionFunction: - def __init__(self, model_name: str, weights_name: str = "DEFAULT", **kwargs) -> None: - weights_enum = torchvision.models.get_model_weights(model_name) - self._weights = weights_enum[weights_name] - self._transforms = self._weights.transforms() - self._model = torchvision.models.get_model(model_name, weights=self._weights, **kwargs) - self._model.eval() +class _TorchvisionKeypointDetectionFunction(TorchvisionFunction): @cached_property def spec(self) -> cvataa.DetectionFunctionSpec: return cvataa.DetectionFunctionSpec( @@ -36,7 +29,10 @@ def spec(self) -> cvataa.DetectionFunctionSpec: ] ) - def detect(self, context, image: PIL.Image.Image) -> List[models.LabeledShapeRequest]: + def detect( + self, context: cvataa.DetectionFunctionContext, image: PIL.Image.Image + ) -> list[models.LabeledShapeRequest]: + conf_threshold = context.conf_threshold or 0 results = self._model([self._transforms(image)]) return [ @@ -52,7 +48,10 @@ def detect(self, context, image: PIL.Image.Image) -> List[models.LabeledShapeReq ], ) for result in results - for keypoints, label in zip(result["keypoints"], result["labels"]) + for keypoints, label, score in zip( + result["keypoints"], result["labels"], result["scores"] + ) + if score >= conf_threshold ] diff --git a/cvat-sdk/cvat_sdk/auto_annotation/interface.py b/cvat-sdk/cvat_sdk/auto_annotation/interface.py index 67313a7da6e5..f95cb50b4f2d 100644 --- a/cvat-sdk/cvat_sdk/auto_annotation/interface.py +++ b/cvat-sdk/cvat_sdk/auto_annotation/interface.py @@ -3,7 +3,8 @@ # SPDX-License-Identifier: MIT import abc -from typing import List, Protocol, Sequence +from collections.abc import Sequence +from typing import Optional, Protocol import attrs import PIL.Image @@ -49,7 +50,33 @@ def frame_name(self) -> str: The file name of the frame that the current image corresponds to in the dataset. """ - ... + + @property + @abc.abstractmethod + def conf_threshold(self) -> Optional[float]: + """ + The confidence threshold that the function should use for filtering + detections. + + If the function is able to estimate confidence levels, then: + + * If this value is None, the function may apply a default threshold at its discretion. + + * Otherwise, it will be a number between 0 and 1. The function must only return + objects with confidence levels greater than or equal to this value. + + If the function is not able to estimate confidence levels, it can ignore this value. + """ + + @property + @abc.abstractmethod + def conv_mask_to_poly(self) -> bool: + """ + If this is true, the function must convert any mask shapes to polygon shapes + before returning them. + + If the function does not return any mask shapes, then it can ignore this value. + """ class DetectionFunction(Protocol): @@ -79,7 +106,7 @@ def spec(self) -> DetectionFunctionSpec: def detect( self, context: DetectionFunctionContext, image: PIL.Image.Image - ) -> List[models.LabeledShapeRequest]: + ) -> list[models.LabeledShapeRequest]: """ Detects objects on the supplied image and returns the results. @@ -151,6 +178,21 @@ def rectangle(label_id: int, points: Sequence[float], **kwargs) -> models.Labele return shape(label_id, type="rectangle", points=points, **kwargs) +def polygon(label_id: int, points: Sequence[float], **kwargs) -> models.LabeledShapeRequest: + """Helper factory function for LabeledShapeRequest with frame=0 and type="polygon".""" + return shape(label_id, type="polygon", points=points, **kwargs) + + +def mask(label_id: int, points: Sequence[float], **kwargs) -> models.LabeledShapeRequest: + """ + Helper factory function for LabeledShapeRequest with frame=0 and type="mask". + + It's recommended to use the cvat.masks.encode_mask function to build the + points argument. + """ + return shape(label_id, type="mask", points=points, **kwargs) + + def skeleton( label_id: int, elements: Sequence[models.SubLabeledShapeRequest], **kwargs ) -> models.LabeledShapeRequest: diff --git a/cvat-sdk/cvat_sdk/core/client.py b/cvat-sdk/cvat_sdk/core/client.py index add7ccb5f3d3..168259920c0f 100644 --- a/cvat-sdk/cvat_sdk/core/client.py +++ b/cvat-sdk/cvat_sdk/core/client.py @@ -7,10 +7,11 @@ import logging import urllib.parse +from collections.abc import Generator, Sequence from contextlib import contextmanager, suppress from pathlib import Path from time import sleep -from typing import Any, Dict, Iterator, Optional, Sequence, Tuple, TypeVar +from typing import Any, Optional, TypeVar import attrs import packaging.specifiers as specifiers @@ -95,7 +96,7 @@ def __init__( if check_server_version: self.check_server_version() - self._repos: Dict[str, Repo] = {} + self._repos: dict[str, Repo] = {} """A cache for created Repository instances""" _ORG_SLUG_HEADER = "X-Organization" @@ -121,7 +122,7 @@ def organization_slug(self, org_slug: Optional[str]): self.api_client.default_headers[self._ORG_SLUG_HEADER] = org_slug @contextmanager - def organization_context(self, slug: str) -> Iterator[None]: + def organization_context(self, slug: str) -> Generator[None, None, None]: prev_slug = self.organization_slug self.organization_slug = slug try: @@ -183,7 +184,7 @@ def __exit__(self, exc_type, exc_value, traceback) -> None: def close(self) -> None: return self.__exit__(None, None, None) - def login(self, credentials: Tuple[str, str]) -> None: + def login(self, credentials: tuple[str, str]) -> None: (auth, _) = self.api_client.auth_api.create_login( models.LoginSerializerExRequest(username=credentials[0], password=credentials[1]) ) @@ -211,7 +212,7 @@ def wait_for_completion( rq_id: str, *, status_check_period: Optional[int] = None, - ) -> Tuple[models.Request, urllib3.HTTPResponse]: + ) -> tuple[models.Request, urllib3.HTTPResponse]: if status_check_period is None: status_check_period = self.config.status_check_period @@ -319,8 +320,8 @@ def make_endpoint_url( path: str, *, psub: Optional[Sequence[Any]] = None, - kwsub: Optional[Dict[str, Any]] = None, - query_params: Optional[Dict[str, Any]] = None, + kwsub: Optional[dict[str, Any]] = None, + query_params: Optional[dict[str, Any]] = None, ) -> str: url = self.host + path if psub or kwsub: @@ -331,7 +332,7 @@ def make_endpoint_url( def make_client( - host: str, *, port: Optional[int] = None, credentials: Optional[Tuple[str, str]] = None + host: str, *, port: Optional[int] = None, credentials: Optional[tuple[str, str]] = None ) -> Client: url = host.rstrip("/") if port: diff --git a/cvat-sdk/cvat_sdk/core/downloading.py b/cvat-sdk/cvat_sdk/core/downloading.py index c2be936c9aa4..d44535b2fc82 100644 --- a/cvat-sdk/cvat_sdk/core/downloading.py +++ b/cvat-sdk/cvat_sdk/core/downloading.py @@ -8,7 +8,7 @@ import json from contextlib import closing from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, Optional +from typing import TYPE_CHECKING, Any, Optional from cvat_sdk.api_client.api_client import Endpoint from cvat_sdk.core.helpers import expect_status @@ -58,8 +58,15 @@ def download_file( except ValueError: file_size = None - with atomic_writer(output_path, "wb") as fd, pbar.task( - total=file_size, desc="Downloading", unit_scale=True, unit="B", unit_divisor=1024 + with ( + atomic_writer(output_path, "wb") as fd, + pbar.task( + total=file_size, + desc="Downloading", + unit_scale=True, + unit="B", + unit_divisor=1024, + ), ): while True: chunk = response.read(amt=CHUNK_SIZE, decode_content=False) @@ -73,8 +80,8 @@ def prepare_file( self, endpoint: Endpoint, *, - url_params: Optional[Dict[str, Any]] = None, - query_params: Optional[Dict[str, Any]] = None, + url_params: Optional[dict[str, Any]] = None, + query_params: Optional[dict[str, Any]] = None, status_check_period: Optional[int] = None, ): client = self._client @@ -111,8 +118,8 @@ def prepare_and_download_file_from_endpoint( endpoint: Endpoint, filename: Path, *, - url_params: Optional[Dict[str, Any]] = None, - query_params: Optional[Dict[str, Any]] = None, + url_params: Optional[dict[str, Any]] = None, + query_params: Optional[dict[str, Any]] = None, pbar: Optional[ProgressReporter] = None, status_check_period: Optional[int] = None, ): diff --git a/cvat-sdk/cvat_sdk/core/helpers.py b/cvat-sdk/cvat_sdk/core/helpers.py index b04e33e4c687..425fbc78a083 100644 --- a/cvat-sdk/cvat_sdk/core/helpers.py +++ b/cvat-sdk/cvat_sdk/core/helpers.py @@ -7,7 +7,8 @@ import io import json import warnings -from typing import Any, Dict, Iterable, List, Optional, Union +from collections.abc import Iterable +from typing import Any, Optional, Union import tqdm import urllib3 @@ -19,7 +20,7 @@ def get_paginated_collection( endpoint: Endpoint, *, return_json: bool = False, **kwargs -) -> Union[List, List[Dict[str, Any]]]: +) -> Union[list, list[dict[str, Any]]]: """ Accumulates results from all the pages """ diff --git a/cvat-sdk/cvat_sdk/core/progress.py b/cvat-sdk/cvat_sdk/core/progress.py index 7fd2d13a2cd2..33c7e420714e 100644 --- a/cvat-sdk/cvat_sdk/core/progress.py +++ b/cvat-sdk/cvat_sdk/core/progress.py @@ -6,7 +6,8 @@ from __future__ import annotations import contextlib -from typing import ContextManager, Iterable, Optional, TypeVar +from collections.abc import Generator, Iterable +from typing import Optional, TypeVar T = TypeVar("T") @@ -26,7 +27,7 @@ class ProgressReporter: """ @contextlib.contextmanager - def task(self, **kwargs) -> ContextManager[None]: + def task(self, **kwargs) -> Generator[None, None, None]: """ Returns a context manager that represents a long-running task for which progress can be reported. diff --git a/cvat-sdk/cvat_sdk/core/proxies/annotations.py b/cvat-sdk/cvat_sdk/core/proxies/annotations.py index e9353888119f..53db2af34712 100644 --- a/cvat-sdk/cvat_sdk/core/proxies/annotations.py +++ b/cvat-sdk/cvat_sdk/core/proxies/annotations.py @@ -3,8 +3,9 @@ # SPDX-License-Identifier: MIT from abc import ABC +from collections.abc import Sequence from enum import Enum -from typing import Optional, Sequence +from typing import Optional from cvat_sdk import models from cvat_sdk.core.proxies.model_proxy import _EntityT diff --git a/cvat-sdk/cvat_sdk/core/proxies/issues.py b/cvat-sdk/cvat_sdk/core/proxies/issues.py index 5df1069c1178..8f844d68522a 100644 --- a/cvat-sdk/cvat_sdk/core/proxies/issues.py +++ b/cvat-sdk/cvat_sdk/core/proxies/issues.py @@ -4,8 +4,6 @@ from __future__ import annotations -from typing import List - from cvat_sdk.api_client import apis, models from cvat_sdk.core.helpers import get_paginated_collection from cvat_sdk.core.proxies.model_proxy import ( @@ -53,7 +51,7 @@ class Issue( ): _model_partial_update_arg = "patched_issue_write_request" - def get_comments(self) -> List[Comment]: + def get_comments(self) -> list[Comment]: return [ Comment(self._client, m) for m in get_paginated_collection( diff --git a/cvat-sdk/cvat_sdk/core/proxies/jobs.py b/cvat-sdk/cvat_sdk/core/proxies/jobs.py index ac81380b7566..4dde676179b7 100644 --- a/cvat-sdk/cvat_sdk/core/proxies/jobs.py +++ b/cvat-sdk/cvat_sdk/core/proxies/jobs.py @@ -6,8 +6,9 @@ import io import mimetypes +from collections.abc import Sequence from pathlib import Path -from typing import TYPE_CHECKING, List, Optional, Sequence +from typing import TYPE_CHECKING, Optional from PIL import Image @@ -52,7 +53,7 @@ def import_annotations( pbar: Optional[ProgressReporter] = None, ): """ - Upload annotations for a job in the specified format (e.g. 'YOLO ZIP 1.0'). + Upload annotations for a job in the specified format (e.g. 'YOLO 1.1'). """ filename = Path(filename) @@ -93,7 +94,7 @@ def download_frames( outdir: StrPath = ".", quality: str = "original", filename_pattern: str = "frame_{frame_id:06d}{frame_ext}", - ) -> Optional[List[Image.Image]]: + ) -> Optional[list[Image.Image]]: """ Download the requested frame numbers for a job and save images as outdir/filename_pattern """ @@ -125,12 +126,12 @@ def get_meta(self) -> models.IDataMetaRead: (meta, _) = self.api.retrieve_data_meta(self.id) return meta - def get_labels(self) -> List[models.ILabel]: + def get_labels(self) -> list[models.ILabel]: return get_paginated_collection( self._client.api_client.labels_api.list_endpoint, job_id=self.id ) - def get_frames_info(self) -> List[models.IFrameMeta]: + def get_frames_info(self) -> list[models.IFrameMeta]: return self.get_meta().frames def remove_frames_by_ids(self, ids: Sequence[int]) -> None: @@ -141,7 +142,7 @@ def remove_frames_by_ids(self, ids: Sequence[int]) -> None: ), ) - def get_issues(self) -> List[Issue]: + def get_issues(self) -> list[Issue]: return [ Issue(self._client, m) for m in get_paginated_collection( diff --git a/cvat-sdk/cvat_sdk/core/proxies/model_proxy.py b/cvat-sdk/cvat_sdk/core/proxies/model_proxy.py index 40b6ffd27549..124d7c2beb3d 100644 --- a/cvat-sdk/cvat_sdk/core/proxies/model_proxy.py +++ b/cvat-sdk/cvat_sdk/core/proxies/model_proxy.py @@ -6,19 +6,16 @@ import json from abc import ABC +from collections.abc import Sequence from copy import deepcopy from pathlib import Path from typing import ( TYPE_CHECKING, Any, Callable, - Dict, Generic, - List, Literal, Optional, - Tuple, - Type, TypeVar, Union, overload, @@ -96,15 +93,15 @@ class Repo(ModelProxy[ModelType, ApiType]): Implements group and management operations for entities. """ - _entity_type: Type[Entity[ModelType, ApiType]] + _entity_type: type[Entity[ModelType, ApiType]] ### Utilities def build_model_bases( - mt: Type[ModelType], at: Type[ApiType], *, api_member_name: Optional[str] = None -) -> Tuple[Type[Entity[ModelType, ApiType]], Type[Repo[ModelType, ApiType]]]: + mt: type[ModelType], at: type[ApiType], *, api_member_name: Optional[str] = None +) -> tuple[type[Entity[ModelType, ApiType]], type[Repo[ModelType, ApiType]]]: """ Helps to remove code duplication in declarations of derived classes """ @@ -128,7 +125,7 @@ class _RepoBase(Repo[ModelType, ApiType]): class ModelCreateMixin(Generic[_EntityT, IModel]): - def create(self: Repo, spec: Union[Dict[str, Any], IModel]) -> _EntityT: + def create(self: Repo, spec: Union[dict[str, Any], IModel]) -> _EntityT: """ Creates a new object on the server and returns the corresponding local object """ @@ -149,12 +146,12 @@ def retrieve(self: Repo, obj_id: int) -> _EntityT: class ModelListMixin(Generic[_EntityT]): @overload - def list(self: Repo, *, return_json: Literal[False] = False) -> List[_EntityT]: ... + def list(self: Repo, *, return_json: Literal[False] = False) -> list[_EntityT]: ... @overload - def list(self: Repo, *, return_json: Literal[True] = False) -> List[Any]: ... + def list(self: Repo, *, return_json: Literal[True] = False) -> list[Any]: ... - def list(self: Repo, *, return_json: bool = False) -> List[Union[_EntityT, Any]]: + def list(self: Repo, *, return_json: bool = False) -> list[Union[_EntityT, Any]]: """ Retrieves all objects from the server and returns them in basic or JSON format. """ @@ -166,6 +163,27 @@ def list(self: Repo, *, return_json: bool = False) -> List[Union[_EntityT, Any]] return [self._entity_type(self._client, model) for model in results] +class ModelBatchDeleteMixin(Repo): + def remove_by_ids(self, ids: Sequence[int], /) -> None: + """ + Delete a list of objects from the server, ignoring those which don't exist. + """ + type_name = self._entity_type.__name__ + + for object_id in ids: + (_, response) = self.api.destroy(object_id, _check_status=False) + + if 200 <= response.status <= 299: + self._client.logger.info(f"{type_name} #{object_id} deleted") + elif response.status == 404: + self._client.logger.info(f"{type_name} #{object_id} not found") + else: + self._client.logger.error( + f"Failed to delete {type_name} #{object_id}: " + f"{response.msg} (status {response.status})" + ) + + #### Entity mixins @@ -174,8 +192,8 @@ class ModelUpdateMixin(ABC, Generic[IModel]): def _model_partial_update_arg(self: Entity) -> str: ... def _export_update_fields( - self: Entity, overrides: Optional[Union[Dict[str, Any], IModel]] = None - ) -> Dict[str, Any]: + self: Entity, overrides: Optional[Union[dict[str, Any], IModel]] = None + ) -> dict[str, Any]: # TODO: support field conversion and assignment updating # fields = to_json(self._model) @@ -194,7 +212,7 @@ def fetch(self: Entity) -> Self: (self._model, _) = self.api.retrieve(id=getattr(self, self._model_id_field)) return self - def update(self: Entity, values: Union[Dict[str, Any], IModel]) -> Self: + def update(self: Entity, values: Union[dict[str, Any], IModel]) -> Self: """ Commits model changes to the server @@ -304,7 +322,7 @@ def export_dataset( cloud_storage_id: Optional[int] = None, ) -> None: """ - Export a dataset in the specified format (e.g. 'YOLO ZIP 1.0'). + Export a dataset in the specified format (e.g. 'YOLO 1.1'). By default, a result file will be downloaded based on the default configuration. To force file downloading, pass `location=Location.LOCAL`. To save a file to a specific cloud storage, use the `location` and `cloud_storage_id` arguments. diff --git a/cvat-sdk/cvat_sdk/core/proxies/projects.py b/cvat-sdk/cvat_sdk/core/proxies/projects.py index 70c647bfd033..0ea7063eaf72 100644 --- a/cvat-sdk/cvat_sdk/core/proxies/projects.py +++ b/cvat-sdk/cvat_sdk/core/proxies/projects.py @@ -7,7 +7,7 @@ import io import json from pathlib import Path -from typing import TYPE_CHECKING, List, Optional +from typing import TYPE_CHECKING, Optional from cvat_sdk.api_client import apis, models from cvat_sdk.core.helpers import get_paginated_collection @@ -15,6 +15,7 @@ from cvat_sdk.core.proxies.model_proxy import ( DownloadBackupMixin, ExportDatasetMixin, + ModelBatchDeleteMixin, ModelCreateMixin, ModelDeleteMixin, ModelListMixin, @@ -52,7 +53,7 @@ def import_dataset( pbar: Optional[ProgressReporter] = None, ): """ - Import dataset for a project in the specified format (e.g. 'YOLO ZIP 1.0'). + Import dataset for a project in the specified format (e.g. 'YOLO 1.1'). """ filename = Path(filename) @@ -72,7 +73,7 @@ def get_annotations(self) -> models.ILabeledData: (annotations, _) = self.api.retrieve_annotations(self.id) return annotations - def get_tasks(self) -> List[Task]: + def get_tasks(self) -> list[Task]: return [ Task(self._client, m) for m in get_paginated_collection( @@ -80,7 +81,7 @@ def get_tasks(self) -> List[Task]: ) ] - def get_labels(self) -> List[models.ILabel]: + def get_labels(self) -> list[models.ILabel]: return get_paginated_collection( self._client.api_client.labels_api.list_endpoint, project_id=self.id ) @@ -97,6 +98,7 @@ class ProjectsRepo( ModelCreateMixin[Project, models.IProjectWriteRequest], ModelListMixin[Project], ModelRetrieveMixin[Project], + ModelBatchDeleteMixin, ): _entity_type = Project diff --git a/cvat-sdk/cvat_sdk/core/proxies/tasks.py b/cvat-sdk/cvat_sdk/core/proxies/tasks.py index fe2d80d857b0..23d8f1c84962 100644 --- a/cvat-sdk/cvat_sdk/core/proxies/tasks.py +++ b/cvat-sdk/cvat_sdk/core/proxies/tasks.py @@ -8,10 +8,11 @@ import json import mimetypes import shutil +from collections.abc import Sequence from enum import Enum from pathlib import Path from time import sleep -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence +from typing import TYPE_CHECKING, Any, Optional from PIL import Image @@ -23,6 +24,7 @@ from cvat_sdk.core.proxies.model_proxy import ( DownloadBackupMixin, ExportDatasetMixin, + ModelBatchDeleteMixin, ModelCreateMixin, ModelDeleteMixin, ModelListMixin, @@ -72,7 +74,7 @@ def upload_data( *, resource_type: ResourceType = ResourceType.LOCAL, pbar: Optional[ProgressReporter] = None, - params: Optional[Dict[str, Any]] = None, + params: Optional[dict[str, Any]] = None, wait_for_completion: bool = True, status_check_period: Optional[int] = None, ) -> None: @@ -167,7 +169,7 @@ def import_annotations( pbar: Optional[ProgressReporter] = None, ): """ - Upload annotations for a task in the specified format (e.g. 'YOLO ZIP 1.0'). + Upload annotations for a task in the specified format (e.g. 'YOLO 1.1'). """ filename = Path(filename) @@ -226,7 +228,7 @@ def download_frames( outdir: StrPath = ".", quality: str = "original", filename_pattern: str = "frame_{frame_id:06d}{frame_ext}", - ) -> Optional[List[Image.Image]]: + ) -> Optional[list[Image.Image]]: """ Download the requested frame numbers for a task and save images as outdir/filename_pattern """ @@ -253,7 +255,7 @@ def download_frames( outfile = filename_pattern.format(frame_id=frame_id, frame_ext=im_ext) im.save(outdir / outfile) - def get_jobs(self) -> List[Job]: + def get_jobs(self) -> list[Job]: return [ Job(self._client, model=m) for m in get_paginated_collection( @@ -265,12 +267,12 @@ def get_meta(self) -> models.IDataMetaRead: (meta, _) = self.api.retrieve_data_meta(self.id) return meta - def get_labels(self) -> List[models.ILabel]: + def get_labels(self) -> list[models.ILabel]: return get_paginated_collection( self._client.api_client.labels_api.list_endpoint, task_id=self.id ) - def get_frames_info(self) -> List[models.IFrameMeta]: + def get_frames_info(self) -> list[models.IFrameMeta]: return self.get_meta().frames def remove_frames_by_ids(self, ids: Sequence[int]) -> None: @@ -285,7 +287,7 @@ class TasksRepo( ModelCreateMixin[Task, models.ITaskWriteRequest], ModelRetrieveMixin[Task], ModelListMixin[Task], - ModelDeleteMixin, + ModelBatchDeleteMixin, ): _entity_type = Task @@ -295,7 +297,7 @@ def create_from_data( resources: Sequence[StrPath], *, resource_type: ResourceType = ResourceType.LOCAL, - data_params: Optional[Dict[str, Any]] = None, + data_params: Optional[dict[str, Any]] = None, annotation_path: str = "", annotation_format: str = "CVAT XML 1.1", status_check_period: int = None, @@ -333,23 +335,16 @@ def create_from_data( return task + # This is a backwards compatibility wrapper to support calls which pass + # the task_ids parameter by keyword (the base class implementation is generic, + # so it doesn't support this). + # pylint: disable-next=arguments-differ def remove_by_ids(self, task_ids: Sequence[int]) -> None: """ Delete a list of tasks, ignoring those which don't exist. """ - for task_id in task_ids: - (_, response) = self.api.destroy(task_id, _check_status=False) - - if 200 <= response.status <= 299: - self._client.logger.info(f"Task ID {task_id} deleted") - elif response.status == 404: - self._client.logger.info(f"Task ID {task_id} not found") - else: - self._client.logger.warning( - f"Failed to delete task ID {task_id}: " - f"{response.msg} (status {response.status})" - ) + super().remove_by_ids(task_ids) def create_from_backup( self, diff --git a/cvat-sdk/cvat_sdk/core/uploading.py b/cvat-sdk/cvat_sdk/core/uploading.py index 0ccfd902da61..068e4d89a0c3 100644 --- a/cvat-sdk/cvat_sdk/core/uploading.py +++ b/cvat-sdk/cvat_sdk/core/uploading.py @@ -6,8 +6,9 @@ import json import os +from contextlib import AbstractContextManager from pathlib import Path -from typing import TYPE_CHECKING, Any, ContextManager, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Optional import requests import urllib3 @@ -147,9 +148,9 @@ def upload_file( url: str, filename: Path, *, - meta: Dict[str, Any], - query_params: Dict[str, Any] = None, - fields: Optional[Dict[str, Any]] = None, + meta: dict[str, Any], + query_params: dict[str, Any] = None, + fields: Optional[dict[str, Any]] = None, pbar: Optional[ProgressReporter] = None, logger=None, ) -> urllib3.HTTPResponse: @@ -194,7 +195,7 @@ def upload_file( return self._tus_finish_upload(url, query_params=query_params, fields=fields) @staticmethod - def _uploading_task(pbar: ProgressReporter, total_size: int) -> ContextManager[None]: + def _uploading_task(pbar: ProgressReporter, total_size: int) -> AbstractContextManager[None]: return pbar.task( total=total_size, desc="Uploading data", unit_scale=True, unit="B", unit_divisor=1024 ) @@ -256,7 +257,7 @@ def upload_file_and_wait( filename: Path, format_name: str, *, - url_params: Optional[Dict[str, Any]] = None, + url_params: Optional[dict[str, Any]] = None, pbar: Optional[ProgressReporter] = None, status_check_period: Optional[int] = None, ): @@ -279,7 +280,7 @@ def upload_file_and_wait( filename: Path, format_name: str, *, - url_params: Optional[Dict[str, Any]] = None, + url_params: Optional[dict[str, Any]] = None, pbar: Optional[ProgressReporter] = None, status_check_period: Optional[int] = None, ): @@ -302,7 +303,7 @@ def __init__(self, client: Client, *, max_request_size: int = MAX_REQUEST_SIZE): def upload_files( self, url: str, - resources: List[Path], + resources: list[Path], *, pbar: Optional[ProgressReporter] = None, **kwargs, @@ -351,10 +352,10 @@ def upload_files( return self._tus_finish_upload(url, fields=kwargs) def _split_files_by_requests( - self, filenames: List[Path] - ) -> Tuple[List[Tuple[List[Path], int]], List[Path], int]: - bulk_files: Dict[str, int] = {} - separate_files: Dict[str, int] = {} + self, filenames: list[Path] + ) -> tuple[list[tuple[list[Path], int]], list[Path], int]: + bulk_files: dict[str, int] = {} + separate_files: dict[str, int] = {} max_request_size = self.max_request_size # sort by size @@ -369,9 +370,9 @@ def _split_files_by_requests( total_size = sum(bulk_files.values()) + sum(separate_files.values()) # group small files by requests - bulk_file_groups: List[Tuple[List[str], int]] = [] + bulk_file_groups: list[tuple[list[str], int]] = [] current_group_size: int = 0 - current_group: List[str] = [] + current_group: list[str] = [] for filename, file_size in bulk_files.items(): if max_request_size < current_group_size + file_size: bulk_file_groups.append((current_group, current_group_size)) diff --git a/cvat-sdk/cvat_sdk/core/utils.py b/cvat-sdk/cvat_sdk/core/utils.py index 0706a2eec613..efcc787d96de 100644 --- a/cvat-sdk/cvat_sdk/core/utils.py +++ b/cvat-sdk/cvat_sdk/core/utils.py @@ -7,43 +7,32 @@ import contextlib import itertools import os -from typing import ( - IO, - Any, - BinaryIO, - ContextManager, - Dict, - Iterator, - Literal, - Sequence, - TextIO, - Union, - overload, -) +from collections.abc import Generator, Sequence +from typing import IO, Any, BinaryIO, Literal, TextIO, Union, overload def filter_dict( - d: Dict[str, Any], *, keep: Sequence[str] = None, drop: Sequence[str] = None -) -> Dict[str, Any]: + d: dict[str, Any], *, keep: Sequence[str] = None, drop: Sequence[str] = None +) -> dict[str, Any]: return {k: v for k, v in d.items() if (not keep or k in keep) and (not drop or k not in drop)} @overload def atomic_writer( path: Union[os.PathLike, str], mode: Literal["wb"] -) -> ContextManager[BinaryIO]: ... +) -> contextlib.AbstractContextManager[BinaryIO]: ... @overload def atomic_writer( path: Union[os.PathLike, str], mode: Literal["w"], encoding: str = "UTF-8" -) -> ContextManager[TextIO]: ... +) -> contextlib.AbstractContextManager[TextIO]: ... @contextlib.contextmanager def atomic_writer( path: Union[os.PathLike, str], mode: Literal["w", "wb"], encoding: str = "UTF-8" -) -> Iterator[IO]: +) -> Generator[IO, None, None]: """ Returns a context manager that, when entered, returns a handle to a temporary file opened with the specified `mode` and `encoding`. If the context manager diff --git a/cvat-sdk/cvat_sdk/datasets/caching.py b/cvat-sdk/cvat_sdk/datasets/caching.py index 08e0c123bfe1..f47cdfc3260f 100644 --- a/cvat-sdk/cvat_sdk/datasets/caching.py +++ b/cvat-sdk/cvat_sdk/datasets/caching.py @@ -6,9 +6,10 @@ import json import shutil from abc import ABCMeta, abstractmethod +from collections.abc import Mapping from enum import Enum, auto from pathlib import Path -from typing import Any, Callable, Dict, List, Mapping, Type, TypeVar, Union, cast +from typing import Any, Callable, TypeVar, Union, cast from attrs import define @@ -39,7 +40,7 @@ class UpdatePolicy(Enum): """ -_CacheObject = Dict[str, Any] +_CacheObject = dict[str, Any] class _CacheObjectModel(metaclass=ABCMeta): @@ -106,7 +107,7 @@ def _serialize_model(self, model: _ModelType) -> _CacheObject: else: raise NotImplementedError("Unexpected model type") - def load_model(self, path: Path, model_type: Type[_ModelType]) -> _ModelType: + def load_model(self, path: Path, model_type: type[_ModelType]) -> _ModelType: return self._deserialize_model(self._load_object(path), model_type) def save_model(self, path: Path, model: _ModelType) -> None: @@ -120,7 +121,7 @@ def ensure_task_model( self, task_id: int, filename: str, - model_type: Type[_ModelType], + model_type: type[_ModelType], downloader: Callable[[], _ModelType], model_description: str, ) -> _ModelType: ... @@ -166,7 +167,7 @@ def ensure_task_model( self, task_id: int, filename: str, - model_type: Type[_ModelType], + model_type: type[_ModelType], downloader: Callable[[], _ModelType], model_description: str, ) -> _ModelType: @@ -225,7 +226,7 @@ def ensure_task_model( self, task_id: int, filename: str, - model_type: Type[_ModelType], + model_type: type[_ModelType], downloader: Callable[[], _ModelType], model_description: str, ) -> _ModelType: @@ -247,7 +248,7 @@ def retrieve_project(self, project_id: int) -> Project: @define class _OfflineTaskModel(_CacheObjectModel): api_model: models.ITaskRead - labels: List[models.ILabel] + labels: list[models.ILabel] def dump(self) -> _CacheObject: return { @@ -278,15 +279,15 @@ def __init__( self._offline_model = cached_model self._cache_manager = cache_manager - def get_labels(self) -> List[models.ILabel]: + def get_labels(self) -> list[models.ILabel]: return self._offline_model.labels @define class _OfflineProjectModel(_CacheObjectModel): api_model: models.IProjectRead - task_ids: List[int] - labels: List[models.ILabel] + task_ids: list[int] + labels: list[models.ILabel] def dump(self) -> _CacheObject: return { @@ -320,14 +321,14 @@ def __init__( self._offline_model = cached_model self._cache_manager = cache_manager - def get_tasks(self) -> List[Task]: + def get_tasks(self) -> list[Task]: return [self._cache_manager.retrieve_task(t) for t in self._offline_model.task_ids] - def get_labels(self) -> List[models.ILabel]: + def get_labels(self) -> list[models.ILabel]: return self._offline_model.labels -_CACHE_MANAGER_CLASSES: Mapping[UpdatePolicy, Type[CacheManager]] = { +_CACHE_MANAGER_CLASSES: Mapping[UpdatePolicy, type[CacheManager]] = { UpdatePolicy.IF_MISSING_OR_STALE: _CacheManagerOnline, UpdatePolicy.NEVER: _CacheManagerOffline, } diff --git a/cvat-sdk/cvat_sdk/datasets/common.py b/cvat-sdk/cvat_sdk/datasets/common.py index b407c490802c..9b816e688bf4 100644 --- a/cvat-sdk/cvat_sdk/datasets/common.py +++ b/cvat-sdk/cvat_sdk/datasets/common.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: MIT import abc -from typing import List, Optional +from typing import Optional import attrs import attrs.validators @@ -24,8 +24,8 @@ class FrameAnnotations: Contains annotations that pertain to a single frame. """ - tags: List[models.LabeledImage] = attrs.Factory(list) - shapes: List[models.LabeledShape] = attrs.Factory(list) + tags: list[models.LabeledImage] = attrs.Factory(list) + shapes: list[models.LabeledShape] = attrs.Factory(list) class MediaElement(metaclass=abc.ABCMeta): diff --git a/cvat-sdk/cvat_sdk/datasets/task_dataset.py b/cvat-sdk/cvat_sdk/datasets/task_dataset.py index cf66fa7ab0ea..68424cbb3815 100644 --- a/cvat-sdk/cvat_sdk/datasets/task_dataset.py +++ b/cvat-sdk/cvat_sdk/datasets/task_dataset.py @@ -5,8 +5,8 @@ from __future__ import annotations import zipfile +from collections.abc import Iterable, Sequence from concurrent.futures import ThreadPoolExecutor -from typing import Iterable, Sequence import PIL.Image diff --git a/cvat-sdk/cvat_sdk/masks.py b/cvat-sdk/cvat_sdk/masks.py new file mode 100644 index 000000000000..f623aec7d043 --- /dev/null +++ b/cvat-sdk/cvat_sdk/masks.py @@ -0,0 +1,44 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import math +from collections.abc import Sequence + +import numpy as np +from numpy.typing import ArrayLike + + +def encode_mask(bitmap: ArrayLike, /, bbox: Sequence[float]) -> list[float]: + """ + Encodes an image mask into an array of numbers suitable for the "points" + attribute of a LabeledShapeRequest object of type "mask". + + bitmap must be a boolean array of shape (H, W), where H is the height and + W is the width of the image that the mask applies to. + + bbox must have the form [x1, y1, x2, y2], where (0, 0) <= (x1, y1) < (x2, y2) <= (W, H). + The mask will be limited to points between (x1, y1) and (x2, y2). + """ + + bitmap = np.asanyarray(bitmap) + if bitmap.ndim != 2: + raise ValueError("bitmap must have 2 dimensions") + if bitmap.dtype != np.bool_: + raise ValueError("bitmap must have boolean items") + + x1, y1 = map(math.floor, bbox[0:2]) + x2, y2 = map(math.ceil, bbox[2:4]) + + if not (0 <= x1 < x2 <= bitmap.shape[1] and 0 <= y1 < y2 <= bitmap.shape[0]): + raise ValueError("bbox has invalid coordinates") + + flat = bitmap[y1:y2, x1:x2].ravel() + + (run_indices,) = np.diff(flat, prepend=[not flat[0]], append=[not flat[-1]]).nonzero() + if flat[0]: + run_lengths = np.diff(run_indices, prepend=[0]) + else: + run_lengths = np.diff(run_indices) + + return run_lengths.tolist() + [x1, y1, x2 - 1, y2 - 1] diff --git a/cvat-sdk/cvat_sdk/pytorch/common.py b/cvat-sdk/cvat_sdk/pytorch/common.py index 97ef38bc33a8..0c208cfc0bd4 100644 --- a/cvat-sdk/cvat_sdk/pytorch/common.py +++ b/cvat-sdk/cvat_sdk/pytorch/common.py @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: MIT -from typing import Mapping +from collections.abc import Mapping import attrs diff --git a/cvat-sdk/cvat_sdk/pytorch/project_dataset.py b/cvat-sdk/cvat_sdk/pytorch/project_dataset.py index ada554ee1210..7548d9e233a0 100644 --- a/cvat-sdk/cvat_sdk/pytorch/project_dataset.py +++ b/cvat-sdk/cvat_sdk/pytorch/project_dataset.py @@ -3,7 +3,8 @@ # SPDX-License-Identifier: MIT import os -from typing import Callable, Container, Mapping, Optional +from collections.abc import Container, Mapping +from typing import Callable, Optional import torch import torch.utils.data diff --git a/cvat-sdk/cvat_sdk/pytorch/task_dataset.py b/cvat-sdk/cvat_sdk/pytorch/task_dataset.py index 8964d2db47db..8434102d9e63 100644 --- a/cvat-sdk/cvat_sdk/pytorch/task_dataset.py +++ b/cvat-sdk/cvat_sdk/pytorch/task_dataset.py @@ -4,7 +4,8 @@ import os import types -from typing import Callable, Mapping, Optional +from collections.abc import Mapping +from typing import Callable, Optional import torchvision.datasets diff --git a/cvat-sdk/cvat_sdk/pytorch/transforms.py b/cvat-sdk/cvat_sdk/pytorch/transforms.py index 1fb99362defc..5c8a4f7390cb 100644 --- a/cvat-sdk/cvat_sdk/pytorch/transforms.py +++ b/cvat-sdk/cvat_sdk/pytorch/transforms.py @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: MIT -from typing import FrozenSet, TypedDict +from typing import TypedDict import attrs import attrs.validators @@ -63,7 +63,7 @@ class ExtractBoundingBoxes: * Rotated shapes are not supported. """ - include_shape_types: FrozenSet[str] = attrs.field( + include_shape_types: frozenset[str] = attrs.field( converter=frozenset, validator=attrs.validators.deep_iterable(attrs.validators.in_(_SUPPORTED_SHAPE_TYPES)), kw_only=True, diff --git a/cvat-sdk/gen/generate.sh b/cvat-sdk/gen/generate.sh index ca9a08be98fe..939ac9d65b44 100755 --- a/cvat-sdk/gen/generate.sh +++ b/cvat-sdk/gen/generate.sh @@ -8,7 +8,7 @@ set -e GENERATOR_VERSION="v6.0.1" -VERSION="2.22.0" +VERSION="2.24.1" LIB_NAME="cvat_sdk" LAYER1_LIB_NAME="${LIB_NAME}/api_client" DST_DIR="$(cd "$(dirname -- "$0")/.." && pwd)" diff --git a/cvat-sdk/gen/generator-config.yml b/cvat-sdk/gen/generator-config.yml index 26e78cb8a3a9..82f46fda971a 100644 --- a/cvat-sdk/gen/generator-config.yml +++ b/cvat-sdk/gen/generator-config.yml @@ -4,7 +4,7 @@ additionalProperties: packageName: "cvat_sdk.api_client" initRequiredVars: true generateSourceCodeOnly: false - generatorLanguageVersion: '>=3.8' + generatorLanguageVersion: '>=3.9' globalProperties: generateAliasAsModel: true apiTests: false diff --git a/cvat-sdk/gen/postprocess.py b/cvat-sdk/gen/postprocess.py index 779dc2e10326..d45d9680dc2f 100755 --- a/cvat-sdk/gen/postprocess.py +++ b/cvat-sdk/gen/postprocess.py @@ -30,8 +30,8 @@ def collect_operations(schema): class Replacer: - REPLACEMENT_TOKEN = r"%%%" - ARGS_TOKEN = r"!!!" + REPLACEMENT_TOKEN = r"%%%" # nosec: hardcoded_password_string + ARGS_TOKEN = r"!!!" # nosec: hardcoded_password_string def __init__(self, schema): self._schema = schema @@ -57,9 +57,9 @@ def make_api_name(self, name: str) -> str: return underscore(name) def make_type_annotation(self, type_repr: str) -> str: - type_repr = type_repr.replace("[", "typing.List[") + type_repr = type_repr.replace("[", "list[") type_repr = type_repr.replace("(", "typing.Union[").replace(")", "]") - type_repr = type_repr.replace("{", "typing.Dict[").replace(":", ",").replace("}", "]") + type_repr = type_repr.replace("{", "dict[").replace(":", ",").replace("}", "]") ANY_pattern = "bool, date, datetime, dict, float, int, list, str" type_repr = type_repr.replace(ANY_pattern, "typing.Any") diff --git a/cvat-sdk/gen/requirements.txt b/cvat-sdk/gen/requirements.txt index 18f397e59dc6..54c28f0b0007 100644 --- a/cvat-sdk/gen/requirements.txt +++ b/cvat-sdk/gen/requirements.txt @@ -1,5 +1,4 @@ # can't have a dependency on base.txt, because it depends on the generated file inflection >= 0.5.1 -isort>=5.10.1 ruamel.yaml>=0.17.21 diff --git a/cvat-sdk/gen/templates/openapi-generator/api.mustache b/cvat-sdk/gen/templates/openapi-generator/api.mustache index 160d641bc305..aa7c3ac686a1 100644 --- a/cvat-sdk/gen/templates/openapi-generator/api.mustache +++ b/cvat-sdk/gen/templates/openapi-generator/api.mustache @@ -240,10 +240,10 @@ class {{classname}}(object): _spec_property_naming: bool = False, _content_type: typing.Optional[str] = None, _host_index: typing.Optional[int] = None, - _request_auths: typing.Optional[typing.List] = None, + _request_auths: typing.Optional[list] = None, _async_call: bool = False, **kwargs, - ) -> typing.Tuple[typing.Optional[{{>return_type}}], urllib3.HTTPResponse]: + ) -> tuple[typing.Optional[{{>return_type}}], urllib3.HTTPResponse]: """{{{summary}}}{{^summary}}{{>operation_name}}{{/summary}} # noqa: E501 {{#notes}} diff --git a/cvat-sdk/gen/templates/openapi-generator/api_client.mustache b/cvat-sdk/gen/templates/openapi-generator/api_client.mustache index 436bd26f2d54..d49af604ce94 100644 --- a/cvat-sdk/gen/templates/openapi-generator/api_client.mustache +++ b/cvat-sdk/gen/templates/openapi-generator/api_client.mustache @@ -68,8 +68,8 @@ class ApiClient(object): def __init__(self, configuration: typing.Optional[Configuration] = None, - headers: typing.Optional[typing.Dict[str, str]] = None, - cookies: typing.Optional[typing.Dict[str, str]] = None, + headers: typing.Optional[dict[str, str]] = None, + cookies: typing.Optional[dict[str, str]] = None, pool_threads: int = 1): """ :param configuration: configuration object for this client @@ -85,7 +85,7 @@ class ApiClient(object): self.pool_threads = pool_threads self.rest_client = rest.RESTClientObject(configuration) - self.default_headers: typing.Dict[str, str] = headers or {} + self.default_headers: dict[str, str] = headers or {} self.cookies = SimpleCookie() if cookies: self.cookies.update(cookies) @@ -161,22 +161,22 @@ class ApiClient(object): self, resource_path: str, method: str, - path_params: typing.Optional[typing.Dict[str, typing.Any]] = None, - query_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None, - header_params: typing.Optional[typing.Dict[str, typing.Any]] = None, + path_params: typing.Optional[dict[str, typing.Any]] = None, + query_params: typing.Optional[list[tuple[str, typing.Any]]] = None, + header_params: typing.Optional[dict[str, typing.Any]] = None, body: typing.Optional[typing.Any] = None, - post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None, - files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None, - response_schema: typing.Optional[typing.Tuple[typing.Any]] = None, - auth_settings: typing.Optional[typing.List[str]] = None, - collection_formats: typing.Optional[typing.Dict[str, str]] = None, + post_params: typing.Optional[list[tuple[str, typing.Any]]] = None, + files: typing.Optional[dict[str, list[io.IOBase]]] = None, + response_schema: typing.Optional[tuple[typing.Any]] = None, + auth_settings: typing.Optional[list[str]] = None, + collection_formats: typing.Optional[dict[str, str]] = None, *, _parse_response: bool = True, - _request_timeout: typing.Optional[typing.Union[int, float, typing.Tuple]] = None, + _request_timeout: typing.Optional[typing.Union[int, float, tuple]] = None, _host: typing.Optional[str] = None, _check_type: typing.Optional[bool] = None, _check_status: bool = True, - _request_auths: typing.Optional[typing.List[typing.Dict[str, typing.Any]]] = None + _request_auths: typing.Optional[list[dict[str, typing.Any]]] = None ): config = self.configuration @@ -271,7 +271,7 @@ class ApiClient(object): return (return_data, response) {{/tornado}} - def get_common_headers(self) -> typing.Dict[str, str]: + def get_common_headers(self) -> dict[str, str]: """ Returns a headers dict with all the required headers for requests """ @@ -324,7 +324,7 @@ class ApiClient(object): """ return to_json(obj, read_files=read_files) - def deserialize(self, response: HTTPResponse, response_schema: typing.Tuple, *, _check_type: bool): + def deserialize(self, response: HTTPResponse, response_schema: tuple, *, _check_type: bool): """Deserializes response into an object. :param response (urllib3.HTTPResponse): object to be deserialized. @@ -384,22 +384,22 @@ class ApiClient(object): self, resource_path: str, method: str, - path_params: typing.Optional[typing.Dict[str, typing.Any]] = None, - query_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None, - header_params: typing.Optional[typing.Dict[str, typing.Any]] = None, + path_params: typing.Optional[dict[str, typing.Any]] = None, + query_params: typing.Optional[list[tuple[str, typing.Any]]] = None, + header_params: typing.Optional[dict[str, typing.Any]] = None, body: typing.Optional[typing.Any] = None, - post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None, - files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None, - response_schema: typing.Optional[typing.Tuple[typing.Any]] = None, - auth_settings: typing.Optional[typing.List[str]] = None, - collection_formats: typing.Optional[typing.Dict[str, str]] = None, + post_params: typing.Optional[list[tuple[str, typing.Any]]] = None, + files: typing.Optional[dict[str, list[io.IOBase]]] = None, + response_schema: typing.Optional[tuple[typing.Any]] = None, + auth_settings: typing.Optional[list[str]] = None, + collection_formats: typing.Optional[dict[str, str]] = None, *, _async_call: typing.Optional[bool] = None, _parse_response: bool = True, - _request_timeout: typing.Optional[typing.Union[int, float, typing.Tuple]] = None, + _request_timeout: typing.Optional[typing.Union[int, float, tuple]] = None, _host: typing.Optional[str] = None, _check_type: typing.Optional[bool] = None, - _request_auths: typing.Optional[typing.List[typing.Dict[str, typing.Any]]] = None, + _request_auths: typing.Optional[list[dict[str, typing.Any]]] = None, _check_status: bool = True, ): """Makes the HTTP request (synchronous) and returns deserialized data. @@ -580,7 +580,7 @@ class ApiClient(object): new_params.append((k, v)) return new_params - def _serialize_file(self, file_instance: io.IOBase) -> typing.Tuple[str, typing.Union[str, bytes], str]: + def _serialize_file(self, file_instance: io.IOBase) -> tuple[str, typing.Union[str, bytes], str]: if file_instance.closed is True: raise ApiValueError("Cannot read a closed file.") filename = os.path.basename(file_instance.name) @@ -592,8 +592,7 @@ class ApiClient(object): return filename, filedata, mimetype def files_parameters(self, - files: typing.Optional[typing.Dict[str, - typing.List[io.IOBase]]] = None): + files: typing.Optional[dict[str, list[io.IOBase]]] = None): """Builds form parameters. :param files: None or a dict with key=param_name and @@ -714,7 +713,7 @@ class ApiClient(object): {{#apiInfo}}{{#apis}} {{>api_name}}: '{{classname}}'{{/apis}}{{/apiInfo}} - _apis: typing.Dict[str, object] = { {{#apiInfo}}{{#apis}} + _apis: dict[str, object] = { {{#apiInfo}}{{#apis}} '{{>api_name}}': [None, '{{classname}}'],{{/apis}}{{/apiInfo}} } @@ -739,10 +738,10 @@ class ApiClient(object): class Endpoint(object): def __init__(self, - settings: typing.Optional[typing.Dict[str, typing.Any]] = None, - params_map: typing.Optional[typing.Dict[str, typing.Any]] = None, - root_map: typing.Optional[typing.Dict[str, typing.Any]] = None, - headers_map: typing.Optional[typing.Dict[str, typing.Any]] = None, + settings: typing.Optional[dict[str, typing.Any]] = None, + params_map: typing.Optional[dict[str, typing.Any]] = None, + root_map: typing.Optional[dict[str, typing.Any]] = None, + headers_map: typing.Optional[dict[str, typing.Any]] = None, api_client: typing.Optional[ApiClient] = None ): """Creates an endpoint @@ -897,9 +896,9 @@ class Endpoint(object): _spec_property_naming: bool = False, _content_type: typing.Optional[str] = None, _host_index: typing.Optional[int] = None, - _request_auths: typing.Optional[typing.List] = None, + _request_auths: typing.Optional[list] = None, _async_call: bool = False, - **kwargs) -> typing.Tuple[typing.Optional[typing.Any], HTTPResponse]: + **kwargs) -> tuple[typing.Optional[typing.Any], HTTPResponse]: """ Keyword Args: endpoint args diff --git a/cvat-sdk/gen/templates/openapi-generator/configuration.mustache b/cvat-sdk/gen/templates/openapi-generator/configuration.mustache index cec0c548f1d7..e66aec294afc 100644 --- a/cvat-sdk/gen/templates/openapi-generator/configuration.mustache +++ b/cvat-sdk/gen/templates/openapi-generator/configuration.mustache @@ -169,8 +169,8 @@ class Configuration: def __init__(self, host: typing.Optional[str] = None, - api_key: typing.Optional[typing.Dict[str, str]] = None, - api_key_prefix: typing.Optional[typing.Dict[str, str]] = None, + api_key: typing.Optional[dict[str, str]] = None, + api_key_prefix: typing.Optional[dict[str, str]] = None, username: typing.Optional[str] = None, password: typing.Optional[str]=None, discard_unknown_keys: bool = False, @@ -179,9 +179,9 @@ class Configuration: signing_info=None, {{/hasHttpSignatureMethods}} server_index: typing.Optional[int] = None, - server_variables: typing.Optional[typing.Dict[str, str]] = None, + server_variables: typing.Optional[dict[str, str]] = None, server_operation_index: typing.Optional[int] = None, - server_operation_variables: typing.Optional[typing.Dict[str, str]] = None, + server_operation_variables: typing.Optional[dict[str, str]] = None, ssl_ca_cert: typing.Optional[str] = None, verify_ssl: typing.Optional[bool] = None, ) -> None: diff --git a/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_composed.mustache b/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_composed.mustache index 97d3cb930c27..e56437b401ee 100644 --- a/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_composed.mustache +++ b/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_composed.mustache @@ -22,7 +22,6 @@ {{name}} ({{{dataType}}}):{{#description}} {{{.}}}.{{/description}} [optional]{{#defaultValue}} if omitted the server will use the default value of {{{.}}}{{/defaultValue}} # noqa: E501 {{/optionalVars}} """ - from {{packageName}}.configuration import Configuration {{#requiredVars}} {{#defaultValue}} @@ -32,7 +31,7 @@ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) - _configuration = kwargs.pop('_configuration', Configuration()) + _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) diff --git a/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_shared.mustache b/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_shared.mustache index 4c149f22ce88..12dbba9ac641 100644 --- a/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_shared.mustache +++ b/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_shared.mustache @@ -27,7 +27,6 @@ {{/optionalVars}} {{> model_templates/docstring_init_required_kwargs }} """ - from {{packageName}}.configuration import Configuration {{#requiredVars}} {{#defaultValue}} @@ -37,7 +36,7 @@ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', True) _path_to_item = kwargs.pop('_path_to_item', ()) - _configuration = kwargs.pop('_configuration', Configuration()) + _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) diff --git a/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_simple.mustache b/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_simple.mustache index 853532e9f5ca..e8daa85e829c 100644 --- a/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_simple.mustache +++ b/cvat-sdk/gen/templates/openapi-generator/model_templates/method_from_openapi_data_simple.mustache @@ -12,7 +12,6 @@ value ({{{dataType}}}):{{#description}} {{{.}}}.{{/description}}{{#defaultValue}} if omitted defaults to {{{.}}}{{/defaultValue}}{{#allowableValues}}, must be one of [{{#enumVars}}{{{value}}}, {{/enumVars}}]{{/allowableValues}} # noqa: E501 {{> model_templates/docstring_init_required_kwargs }} """ - from {{packageName}}.configuration import Configuration # required up here when default value is not given _path_to_item = kwargs.pop('_path_to_item', ()) @@ -39,7 +38,7 @@ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) - _configuration = kwargs.pop('_configuration', Configuration()) + _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) {{> model_templates/invalid_pos_args }} diff --git a/cvat-sdk/gen/templates/openapi-generator/model_templates/method_init_shared.mustache b/cvat-sdk/gen/templates/openapi-generator/model_templates/method_init_shared.mustache index 998b4841b7e7..c7d402a6cc52 100644 --- a/cvat-sdk/gen/templates/openapi-generator/model_templates/method_init_shared.mustache +++ b/cvat-sdk/gen/templates/openapi-generator/model_templates/method_init_shared.mustache @@ -30,7 +30,6 @@ {{/optionalVars}} {{> model_templates/docstring_init_required_kwargs }} """ - from {{packageName}}.configuration import Configuration {{#requiredVars}} {{^isReadOnly}} @@ -42,7 +41,7 @@ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) - _configuration = kwargs.pop('_configuration', Configuration()) + _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) {{> model_templates/invalid_pos_args }} diff --git a/cvat-sdk/gen/templates/openapi-generator/model_templates/method_init_simple.mustache b/cvat-sdk/gen/templates/openapi-generator/model_templates/method_init_simple.mustache index 8c8b42ce1f49..424b1d439c62 100644 --- a/cvat-sdk/gen/templates/openapi-generator/model_templates/method_init_simple.mustache +++ b/cvat-sdk/gen/templates/openapi-generator/model_templates/method_init_simple.mustache @@ -20,7 +20,6 @@ value ({{{dataType}}}):{{#description}} {{{.}}}.{{/description}}{{#defaultValue}} if omitted defaults to {{{.}}}{{/defaultValue}}{{#allowableValues}}, must be one of [{{#enumVars}}{{{value}}}, {{/enumVars}}]{{/allowableValues}} # noqa: E501 {{> model_templates/docstring_init_required_kwargs }} """ - from {{packageName}}.configuration import Configuration # required up here when default value is not given _path_to_item = kwargs.pop('_path_to_item', ()) @@ -45,7 +44,7 @@ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) - _configuration = kwargs.pop('_configuration', Configuration()) + _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) {{> model_templates/invalid_pos_args }} diff --git a/cvat-sdk/gen/templates/openapi-generator/model_utils.mustache b/cvat-sdk/gen/templates/openapi-generator/model_utils.mustache index c9e2b70d77bb..cc3c03dbce77 100644 --- a/cvat-sdk/gen/templates/openapi-generator/model_utils.mustache +++ b/cvat-sdk/gen/templates/openapi-generator/model_utils.mustache @@ -354,6 +354,13 @@ class OpenApiModel(object): new_inst = new_cls._new_from_openapi_data(*args, **kwargs) return new_inst + def __setstate__(self, state): + # This is the same as the default implementation. We override it, + # because unpickling attempts to access `obj.__setstate__` on an uninitialized + # object, and if this method is not defined, it results in a call to `__getattr__`. + # This fails, because `__getattr__` relies on `self._data_store`, which doesn't + # exist in an uninitialized object. + self.__dict__.update(state) class ModelSimple(OpenApiModel): """the parent class of models whose type != object in their @@ -1084,7 +1091,7 @@ def deserialize_file(response_data, configuration, content_disposition=None): (file_type): the deserialized file which is open The user is responsible for closing and reading the file """ - fd, path = tempfile.mkstemp(dir=configuration.temp_folder_path) + fd, path = tempfile.mkstemp(dir=configuration.temp_folder_path if configuration else None) os.close(fd) os.remove(path) @@ -1263,27 +1270,21 @@ def validate_and_convert_types(input_value, required_types_mixed, path_to_item, input_class_simple = get_simple_class(input_value) valid_type = is_valid_type(input_class_simple, valid_classes) if not valid_type: - if (configuration - or (input_class_simple == dict - and dict not in valid_classes)): - # if input_value is not valid_type try to convert it - converted_instance = attempt_convert_item( - input_value, - valid_classes, - path_to_item, - configuration, - spec_property_naming, - key_type=False, - must_convert=True, - check_type=_check_type - ) - return converted_instance - else: - raise get_type_error(input_value, path_to_item, valid_classes, - key_type=False) + # if input_value is not valid_type try to convert it + converted_instance = attempt_convert_item( + input_value, + valid_classes, + path_to_item, + configuration, + spec_property_naming, + key_type=False, + must_convert=True, + check_type=_check_type + ) + return converted_instance # input_value's type is in valid_classes - if len(valid_classes) > 1 and configuration: + if len(valid_classes) > 1: # there are valid classes which are not the current class valid_classes_coercible = remove_uncoercible( valid_classes, input_value, spec_property_naming, must_convert=False) diff --git a/cvat-sdk/gen/templates/openapi-generator/setup.mustache b/cvat-sdk/gen/templates/openapi-generator/setup.mustache index eb89f5d20554..e0379cabd06e 100644 --- a/cvat-sdk/gen/templates/openapi-generator/setup.mustache +++ b/cvat-sdk/gen/templates/openapi-generator/setup.mustache @@ -77,7 +77,8 @@ setup( python_requires="{{{generatorLanguageVersion}}}", install_requires=BASE_REQUIREMENTS, extras_require={ - "pytorch": ['torch', 'torchvision'], + "masks": ["numpy>=2"], + "pytorch": ['torch', 'torchvision', 'scikit-image>=0.24', 'cvat_sdk[masks]'], }, package_dir={"": "."}, packages=find_packages(include=["cvat_sdk*"]), diff --git a/cvat-ui/package.json b/cvat-ui/package.json index 81b392eb7e54..ce374b2e2be6 100644 --- a/cvat-ui/package.json +++ b/cvat-ui/package.json @@ -1,6 +1,6 @@ { "name": "cvat-ui", - "version": "1.66.1", + "version": "1.67.0", "description": "CVAT single-page application", "main": "src/index.tsx", "scripts": { @@ -21,7 +21,7 @@ "license": "MIT", "dependencies": { "@ant-design/compatible": "^5.1.2", - "@ant-design/icons": "^4.6.3", + "@ant-design/icons": "^5.5.2", "@react-awesome-query-builder/antd": "^6.5.2", "@types/json-logic-js": "^2.0.2", "@types/lru-cache": "^7.10.10", diff --git a/cvat-ui/react_nginx.conf b/cvat-ui/react_nginx.conf index 5f1f4b48997a..6f9437ebbd75 100644 --- a/cvat-ui/react_nginx.conf +++ b/cvat-ui/react_nginx.conf @@ -1,7 +1,7 @@ server { root /usr/share/nginx/html; - # Disable server signature to make it slighty harder for + # Disable server signature to make it slightly harder for # attackers to find known vulnerabilities. See # https://datatracker.ietf.org/doc/html/rfc9110#name-server server_tokens off; diff --git a/cvat-ui/src/actions/annotation-actions.ts b/cvat-ui/src/actions/annotation-actions.ts index 9e3eeb8176b3..115470429990 100644 --- a/cvat-ui/src/actions/annotation-actions.ts +++ b/cvat-ui/src/actions/annotation-actions.ts @@ -12,7 +12,7 @@ import { } from 'cvat-canvas-wrapper'; import { getCore, MLModel, JobType, Job, QualityConflict, - ObjectState, JobState, ValidationLayout, + ObjectState, JobState, JobValidationLayout, } from 'cvat-core-wrapper'; import logger, { EventScope } from 'cvat-logger'; import { getCVATStore } from 'cvat-store'; @@ -38,7 +38,7 @@ interface AnnotationsParameters { showGroundTruth: boolean; jobInstance: Job; groundTruthInstance: Job | null; - validationLayout: ValidationLayout | null; + validationLayout: JobValidationLayout | null; } const cvat = getCore(); @@ -126,6 +126,8 @@ export enum AnnotationActionTypes { COLLAPSE_APPEARANCE = 'COLLAPSE_APPEARANCE', COLLAPSE_OBJECT_ITEMS = 'COLLAPSE_OBJECT_ITEMS', ACTIVATE_OBJECT = 'ACTIVATE_OBJECT', + UPDATE_EDITED_STATE = 'UPDATE_EDITED_STATE', + HIDE_ACTIVE_OBJECT = 'HIDE_ACTIVE_OBJECT', REMOVE_OBJECT = 'REMOVE_OBJECT', REMOVE_OBJECT_SUCCESS = 'REMOVE_OBJECT_SUCCESS', REMOVE_OBJECT_FAILED = 'REMOVE_OBJECT_FAILED', @@ -450,6 +452,7 @@ export function propagateObjectAsync(from: number, to: number): ThunkAction { const { job: { instance: sessionInstance, + frameNumbers, }, annotations: { activatedStateID, @@ -463,12 +466,17 @@ export function propagateObjectAsync(from: number, to: number): ThunkAction { throw new Error('There is not an activated object state to be propagated'); } - await sessionInstance.logger.log(EventScope.propagateObject, { count: Math.abs(to - from) }); - const states = cvat.utils.propagateShapes([objectState], from, to); + if (!sessionInstance) { + throw new Error('SessionInstance is not defined, propagation is not possible'); + } - await sessionInstance.annotations.put(states); - const history = await sessionInstance.actions.get(); + const states = cvat.utils.propagateShapes([objectState], from, to, frameNumbers); + if (states.length) { + await sessionInstance.logger.log(EventScope.propagateObject, { count: states.length }); + await sessionInstance.annotations.put(states); + } + const history = await sessionInstance.actions.get(); dispatch({ type: AnnotationActionTypes.PROPAGATE_OBJECT_SUCCESS, payload: { history }, @@ -594,10 +602,10 @@ export function confirmCanvasReadyAsync(): ThunkAction { return async (dispatch: ThunkDispatch, getState: () => CombinedState): Promise => { try { const state: CombinedState = getState(); - const { instance: job } = state.annotation.job; + const job = state.annotation.job.instance as Job; + const includedFrames = state.annotation.job.frameNumbers; const { changeFrameEvent } = state.annotation.player.frame; const chunks = await job.frames.cachedChunks() as number[]; - const includedFrames = await job.frames.frameNumbers() as number[]; const { frameCount, dataChunkSize } = job; const ranges = chunks.map((chunk) => ( @@ -914,7 +922,6 @@ export function getJobAsync({ } } - const jobMeta = await cvat.frames.getMeta('job', job.id); // frame query parameter does not work for GT job const frameNumber = Number.isInteger(initialFrame) && gtJob?.id !== job.id ? initialFrame as number : @@ -923,6 +930,8 @@ export function getJobAsync({ )) || job.startFrame; const frameData = await job.frames.get(frameNumber); + const jobMeta = await cvat.frames.getMeta('job', job.id); + const frameNumbers = await job.frames.frameNumbers(); try { // call first getting of frame data before rendering interface // to load and decode first chunk @@ -960,6 +969,7 @@ export function getJobAsync({ payload: { openTime, job, + frameNumbers, jobMeta, queryParameters, groundTruthInstance: gtJob || null, @@ -1071,7 +1081,7 @@ export function finishCurrentJobAsync(): ThunkAction { export function rememberObject(createParams: { activeObjectType?: ObjectType; activeLabelID?: number; - activeShapeType?: ShapeType; + activeShapeType?: ShapeType | null; activeNumOfPoints?: number; activeRectDrawingMethod?: RectDrawingMethod; activeCuboidDrawingMethod?: CuboidDrawingMethod; @@ -1320,7 +1330,7 @@ export function searchAnnotationsAsync( }; } -const ShapeTypeToControl: Record = { +export const ShapeTypeToControl: Record = { [ShapeType.RECTANGLE]: ActiveControl.DRAW_RECTANGLE, [ShapeType.POLYLINE]: ActiveControl.DRAW_POLYLINE, [ShapeType.POLYGON]: ActiveControl.DRAW_POLYGON, @@ -1608,3 +1618,50 @@ export function restoreFrameAsync(frame: number): ThunkAction { } }; } + +export function changeHideActiveObjectAsync(hide: boolean): ThunkAction { + return async (dispatch: ThunkDispatch, getState): Promise => { + const state = getState(); + const { instance: canvas } = state.annotation.canvas; + if (canvas) { + (canvas as Canvas).configure({ + hideEditedObject: hide, + }); + + const { objectState } = state.annotation.editing; + if (objectState) { + objectState.hidden = hide; + await dispatch(updateAnnotationsAsync([objectState])); + } + + dispatch({ + type: AnnotationActionTypes.HIDE_ACTIVE_OBJECT, + payload: { + hide, + }, + }); + } + }; +} + +export function updateEditedStateAsync(objectState: ObjectState | null): ThunkAction { + return async (dispatch: ThunkDispatch, getState): Promise => { + let newActiveObjectHidden = false; + if (objectState) { + newActiveObjectHidden = objectState.hidden; + } + + dispatch({ + type: AnnotationActionTypes.UPDATE_EDITED_STATE, + payload: { + objectState, + }, + }); + + const state = getState(); + const { activeObjectHidden } = state.annotation.canvas; + if (activeObjectHidden !== newActiveObjectHidden) { + dispatch(changeHideActiveObjectAsync(newActiveObjectHidden)); + } + }; +} diff --git a/cvat-ui/src/actions/export-actions.ts b/cvat-ui/src/actions/export-actions.ts index db59a6315c6a..6872e4f853c0 100644 --- a/cvat-ui/src/actions/export-actions.ts +++ b/cvat-ui/src/actions/export-actions.ts @@ -4,11 +4,12 @@ // SPDX-License-Identifier: MIT import { ActionUnion, createAction, ThunkAction } from 'utils/redux'; - -import { Storage, ProjectOrTaskOrJob, Job } from 'cvat-core-wrapper'; import { - getInstanceType, RequestInstanceType, listen, RequestsActions, - shouldListenForProgress, + Storage, ProjectOrTaskOrJob, Job, getCore, StorageLocation, +} from 'cvat-core-wrapper'; +import { + getInstanceType, RequestInstanceType, listen, + RequestsActions, updateRequestProgress, } from './requests-actions'; export enum ExportActionTypes { @@ -24,6 +25,8 @@ export enum ExportActionTypes { EXPORT_BACKUP_FAILED = 'EXPORT_BACKUP_FAILED', } +const core = getCore(); + export const exportActions = { openExportDatasetModal: (instance: ProjectOrTaskOrJob) => ( createAction(ExportActionTypes.OPEN_EXPORT_DATASET_MODAL, { instance }) @@ -36,7 +39,7 @@ export const exportActions = { instanceType: 'project' | 'task' | 'job', format: string, resource: 'dataset' | 'annotations', - target?: 'local' | 'cloudstorage', + target?: StorageLocation, ) => ( createAction(ExportActionTypes.EXPORT_DATASET_SUCCESS, { instance, @@ -67,7 +70,7 @@ export const exportActions = { closeExportBackupModal: (instance: ProjectOrTaskOrJob) => ( createAction(ExportActionTypes.CLOSE_EXPORT_BACKUP_MODAL, { instance }) ), - exportBackupSuccess: (instance: Exclude | RequestInstanceType, instanceType: 'task' | 'project', target?: 'local' | 'cloudstorage') => ( + exportBackupSuccess: (instance: Exclude | RequestInstanceType, instanceType: 'task' | 'project', target?: StorageLocation) => ( createAction(ExportActionTypes.EXPORT_BACKUP_SUCCESS, { instance, instanceType, target }) ), exportBackupFailed: (instance: Exclude | RequestInstanceType, instanceType: 'task' | 'project', error: any) => ( @@ -75,30 +78,9 @@ export const exportActions = { ), }; -export async function listenExportDatasetAsync( - rqID: string, - dispatch: (action: ExportActions | RequestsActions) => void, - params: { - instance: ProjectOrTaskOrJob | RequestInstanceType, - format: string, - saveImages: boolean, - }, -): Promise { - const { instance, format, saveImages } = params; - const resource = saveImages ? 'dataset' : 'annotations'; - - const instanceType = getInstanceType(instance); - try { - const result = await listen(rqID, dispatch); - const target = !result?.url ? 'cloudstorage' : 'local'; - dispatch(exportActions.exportDatasetSuccess( - instance, instanceType, format, resource, target, - )); - } catch (error) { - dispatch(exportActions.exportDatasetFailed(instance, instanceType, format, resource, error)); - } -} - +/** * + * Function is supposed to be used when a new dataset export request initiated by a user +** */ export const exportDatasetAsync = ( instance: ProjectOrTaskOrJob, format: string, @@ -106,21 +88,23 @@ export const exportDatasetAsync = ( useDefaultSettings: boolean, targetStorage: Storage, name?: string, -): ThunkAction => async (dispatch, getState) => { - const state = getState(); - +): ThunkAction => async (dispatch) => { const resource = saveImages ? 'dataset' : 'annotations'; const instanceType = getInstanceType(instance); try { const rqID = await instance.annotations .exportDataset(format, saveImages, useDefaultSettings, targetStorage, name); - if (shouldListenForProgress(rqID, state.requests)) { - await listenExportDatasetAsync(rqID, dispatch, { - instance, format, saveImages, + + if (rqID) { + await core.requests.listen(rqID, { + callback: (updatedRequest) => updateRequestProgress(updatedRequest, dispatch), }); - } - if (!rqID) { + const target = targetStorage.location; + dispatch(exportActions.exportDatasetSuccess( + instance, instanceType, format, resource, target, + )); + } else { dispatch(exportActions.exportDatasetSuccess( instance, instanceType, format, resource, )); @@ -130,47 +114,79 @@ export const exportDatasetAsync = ( } }; -export async function listenExportBackupAsync( +/** * + * Function is supposed to be used when a new backup export request initiated by a user +** */ +export const exportBackupAsync = ( + instance: Exclude, + targetStorage: Storage, + useDefaultSetting: boolean, + fileName: string, +): ThunkAction => async (dispatch) => { + const instanceType = getInstanceType(instance) as 'project' | 'task'; + try { + const rqID = await instance.backup(targetStorage, useDefaultSetting, fileName); + if (rqID) { + await core.requests.listen(rqID, { + callback: (updatedRequest) => updateRequestProgress(updatedRequest, dispatch), + }); + const target = targetStorage.location; + dispatch(exportActions.exportBackupSuccess(instance, instanceType, target)); + } else { + dispatch(exportActions.exportBackupSuccess(instance, instanceType)); + } + } catch (error) { + dispatch(exportActions.exportBackupFailed(instance, instanceType, error as Error)); + } +}; + +/** * + * Function is supposed to be used when application starts listening to existing dataset export request +** */ +export async function listenExportDatasetAsync( rqID: string, dispatch: (action: ExportActions | RequestsActions) => void, params: { - instance: Exclude | RequestInstanceType, + instance: ProjectOrTaskOrJob | RequestInstanceType, + format: string, + saveImages: boolean, }, ): Promise { - const { instance } = params; - const instanceType = getInstanceType(instance) as 'project' | 'task'; + const { instance, format, saveImages } = params; + const resource = saveImages ? 'dataset' : 'annotations'; + const instanceType = getInstanceType(instance); try { const result = await listen(rqID, dispatch); - const target = !result?.url ? 'cloudstorage' : 'local'; - dispatch(exportActions.exportBackupSuccess(instance, instanceType, target)); + const target = !result?.url ? StorageLocation.CLOUD_STORAGE : StorageLocation.LOCAL; + dispatch(exportActions.exportDatasetSuccess( + instance, instanceType, format, resource, target, + )); } catch (error) { - dispatch(exportActions.exportBackupFailed(instance, instanceType, error as Error)); + dispatch(exportActions.exportDatasetFailed(instance, instanceType, format, resource, error)); } } -export const exportBackupAsync = ( - instance: Exclude, - targetStorage: Storage, - useDefaultSetting: boolean, - fileName: string, -): ThunkAction => async (dispatch, getState) => { - const state = getState(); - +/** * + * Function is supposed to be used when application starts listening to existing backup export request +** */ +export async function listenExportBackupAsync( + rqID: string, + dispatch: (action: ExportActions | RequestsActions) => void, + params: { + instance: Exclude | RequestInstanceType, + }, +): Promise { + const { instance } = params; const instanceType = getInstanceType(instance) as 'project' | 'task'; try { - const rqID = await instance - .backup(targetStorage, useDefaultSetting, fileName); - if (shouldListenForProgress(rqID, state.requests)) { - await listenExportBackupAsync(rqID, dispatch, { instance }); - } - if (!rqID) { - dispatch(exportActions.exportBackupSuccess(instance, instanceType)); - } + const result = await listen(rqID, dispatch); + const target = !result?.url ? StorageLocation.CLOUD_STORAGE : StorageLocation.LOCAL; + dispatch(exportActions.exportBackupSuccess(instance, instanceType, target)); } catch (error) { dispatch(exportActions.exportBackupFailed(instance, instanceType, error as Error)); } -}; +} export type ExportActions = ActionUnion; diff --git a/cvat-ui/src/actions/import-actions.ts b/cvat-ui/src/actions/import-actions.ts index e47db0b47818..d7e3a548bb3b 100644 --- a/cvat-ui/src/actions/import-actions.ts +++ b/cvat-ui/src/actions/import-actions.ts @@ -4,15 +4,14 @@ // SPDX-License-Identifier: MIT import { createAction, ActionUnion, ThunkAction } from 'utils/redux'; -import { CombinedState } from 'reducers'; import { getCore, Storage, Job, Task, Project, ProjectOrTaskOrJob, } from 'cvat-core-wrapper'; import { getProjectsAsync } from './projects-actions'; import { AnnotationActionTypes, fetchAnnotationsAsync } from './annotation-actions'; import { - getInstanceType, listen, RequestInstanceType, RequestsActions, - shouldListenForProgress, + getInstanceType, listen, RequestInstanceType, + RequestsActions, updateRequestProgress, } from './requests-actions'; const core = getCore(); @@ -69,25 +68,9 @@ export const importActions = { ), }; -export async function listenImportDatasetAsync( - rqID: string, - dispatch: (action: ImportActions | RequestsActions) => void, - params: { - instance: ProjectOrTaskOrJob | RequestInstanceType, - }, -): Promise { - const { instance } = params; - - const instanceType = getInstanceType(instance); - const resource = instanceType === 'project' ? 'dataset' : 'annotation'; - try { - await listen(rqID, dispatch); - dispatch(importActions.importDatasetSuccess(instance, resource)); - } catch (error) { - dispatch(importActions.importDatasetFailed(instance, resource, error)); - } -} - +/** * + * Function is supposed to be used when a new dataset import request initiated by a user +** */ export const importDatasetAsync = ( instance: ProjectOrTaskOrJob, format: string, @@ -100,55 +83,63 @@ export const importDatasetAsync = ( const instanceType = getInstanceType(instance); const resource = instanceType === 'project' ? 'dataset' : 'annotation'; - try { - const state: CombinedState = getState(); + const listenForImport = (rqID: string) => core.requests.listen(rqID, { + callback: (updatedRequest) => updateRequestProgress(updatedRequest, dispatch), + }); + try { if (instanceType === 'project') { dispatch(importActions.importDataset(instance, format)); - const rqID = await (instance as Project).annotations - .importDataset(format, useDefaultSettings, sourceStorage, file, { + const rqID = await (instance as Project).annotations.importDataset( + format, + useDefaultSettings, + sourceStorage, + file, + { convMaskToPoly, updateStatusCallback: (message: string, progress: number) => ( dispatch(importActions.importDatasetUpdateStatus( instance, Math.floor(progress * 100), message, )) ), - }); - if (shouldListenForProgress(rqID, state.requests)) { - await listen(rqID, dispatch); - } + }, + ); + + await listenForImport(rqID); } else if (instanceType === 'task') { dispatch(importActions.importDataset(instance, format)); - const rqID = await (instance as Task).annotations - .upload(format, useDefaultSettings, sourceStorage, file, { - convMaskToPoly, - }); - if (shouldListenForProgress(rqID, state.requests)) { - await listen(rqID, dispatch); - } + const rqID = await (instance as Task).annotations.upload( + format, + useDefaultSettings, + sourceStorage, + file, + { convMaskToPoly }, + ); + await listenForImport(rqID); } else { // job dispatch(importActions.importDataset(instance, format)); - const rqID = await (instance as Job).annotations - .upload(format, useDefaultSettings, sourceStorage, file, { - convMaskToPoly, + const rqID = await (instance as Job).annotations.upload( + format, + useDefaultSettings, + sourceStorage, + file, + { convMaskToPoly }, + ); + + await listenForImport(rqID); + await (instance as Job).annotations.clear({ reload: true }); + await (instance as Job).actions.clear(); + + // first set empty objects list + // to escape some problems in canvas when shape with the same + // clientID has different type (polygon, rectangle) for example + dispatch({ type: AnnotationActionTypes.UPLOAD_JOB_ANNOTATIONS_SUCCESS }); + + const relevantInstance = getState().annotation.job.instance; + if (relevantInstance && relevantInstance.id === instance.id) { + setTimeout(() => { + dispatch(fetchAnnotationsAsync()); }); - if (shouldListenForProgress(rqID, state.requests)) { - await listen(rqID, dispatch); - - await (instance as Job).annotations.clear({ reload: true }); - await (instance as Job).actions.clear(); - - // first set empty objects list - // to escape some problems in canvas when shape with the same - // clientID has different type (polygon, rectangle) for example - dispatch({ type: AnnotationActionTypes.UPLOAD_JOB_ANNOTATIONS_SUCCESS }); - - const relevantInstance = getState().annotation.job.instance; - if (relevantInstance && relevantInstance.id === instance.id) { - setTimeout(() => { - dispatch(fetchAnnotationsAsync()); - }); - } } } } catch (error) { @@ -163,6 +154,28 @@ export const importDatasetAsync = ( } ); +/** * + * Function is supposed to be used when a new backup import request initiated by a user +** */ +export const importBackupAsync = (instanceType: 'project' | 'task', storage: Storage, file: File | string): ThunkAction => ( + async (dispatch) => { + dispatch(importActions.importBackup()); + try { + const instanceClass = (instanceType === 'task') ? core.classes.Task : core.classes.Project; + const rqID = await instanceClass.restore(storage, file); + const result = await core.requests.listen(rqID, { + callback: (updatedRequest) => updateRequestProgress(updatedRequest, dispatch), + }); + dispatch(importActions.importBackupSuccess(result?.resultID as number, instanceType)); + } catch (error) { + dispatch(importActions.importBackupFailed(instanceType, error)); + } + } +); + +/** * + * Function is supposed to be used when application starts listening to existing backup import request +** */ export async function listenImportBackupAsync( rqID: string, dispatch: (action: ImportActions | RequestsActions) => void, @@ -171,32 +184,34 @@ export async function listenImportBackupAsync( }, ): Promise { const { instanceType } = params; - try { const result = await listen(rqID, dispatch); - - dispatch(importActions.importBackupSuccess(result?.resultID, instanceType)); + dispatch(importActions.importBackupSuccess(result?.resultID as number, instanceType)); } catch (error) { dispatch(importActions.importBackupFailed(instanceType, error)); } } -export const importBackupAsync = (instanceType: 'project' | 'task', storage: Storage, file: File | string): ThunkAction => ( - async (dispatch, getState) => { - const state: CombinedState = getState(); - - dispatch(importActions.importBackup()); +/** * + * Function is supposed to be used when application starts listening to existing dataset import request +** */ +export async function listenImportDatasetAsync( + rqID: string, + dispatch: (action: ImportActions | RequestsActions) => void, + params: { + instance: ProjectOrTaskOrJob | RequestInstanceType, + }, +): Promise { + const { instance } = params; - try { - const instanceClass = (instanceType === 'task') ? core.classes.Task : core.classes.Project; - const rqID = await instanceClass.restore(storage, file); - if (shouldListenForProgress(rqID, state.requests)) { - await listenImportBackupAsync(rqID, dispatch, { instanceType }); - } - } catch (error) { - dispatch(importActions.importBackupFailed(instanceType, error)); - } + const instanceType = getInstanceType(instance); + const resource = instanceType === 'project' ? 'dataset' : 'annotation'; + try { + await listen(rqID, dispatch); + dispatch(importActions.importDatasetSuccess(instance, resource)); + } catch (error) { + dispatch(importActions.importDatasetFailed(instance, resource, error)); } -); +} export type ImportActions = ActionUnion; diff --git a/cvat-ui/src/actions/jobs-actions.ts b/cvat-ui/src/actions/jobs-actions.ts index 7c2df71a9270..e7d13e23b7f1 100644 --- a/cvat-ui/src/actions/jobs-actions.ts +++ b/cvat-ui/src/actions/jobs-actions.ts @@ -1,5 +1,5 @@ // Copyright (C) 2022 Intel Corporation -// Copyright (C) 2023 CVAT.ai Corporation +// Copyright (C) 2023-2024 CVAT.ai Corporation // // SPDX-License-Identifier: MIT @@ -34,7 +34,9 @@ interface JobsList extends Array { } const jobsActions = { - getJobs: (query: Partial) => createAction(JobsActionTypes.GET_JOBS, { query }), + getJobs: (query: Partial, fetchingTimestamp: number) => ( + createAction(JobsActionTypes.GET_JOBS, { query, fetchingTimestamp }) + ), getJobsSuccess: (jobs: JobsList) => ( createAction(JobsActionTypes.GET_JOBS_SUCCESS, { jobs }) ), @@ -73,16 +75,26 @@ const jobsActions = { export type JobsActions = ActionUnion; -export const getJobsAsync = (query: JobsQuery): ThunkAction => async (dispatch) => { +export const getJobsAsync = (query: JobsQuery): ThunkAction => async (dispatch, getState) => { + const requestedOn = Date.now(); + const isRequestRelevant = (): boolean => ( + getState().jobs.fetchingTimestamp === requestedOn + ); + try { // We remove all keys with null values from the query const filteredQuery = filterNull(query); - dispatch(jobsActions.getJobs(filteredQuery as JobsQuery)); + dispatch(jobsActions.getJobs(filteredQuery as JobsQuery, requestedOn)); const jobs = await cvat.jobs.get(filteredQuery); - dispatch(jobsActions.getJobsSuccess(jobs)); + + if (isRequestRelevant()) { + dispatch(jobsActions.getJobsSuccess(jobs)); + } } catch (error) { - dispatch(jobsActions.getJobsFailed(error)); + if (isRequestRelevant()) { + dispatch(jobsActions.getJobsFailed(error)); + } } }; @@ -96,10 +108,20 @@ export const getJobPreviewAsync = (job: Job): ThunkAction => async (dispatch) => } }; -export const createJobAsync = (data: JobData): ThunkAction => async (dispatch) => { - const jobInstance = new cvat.classes.Job(data); +export const createJobAsync = (data: JobData): ThunkAction> => async (dispatch) => { + const initialData = { + type: data.type, + task_id: data.taskID, + }; + const jobInstance = new cvat.classes.Job(initialData); try { - const savedJob = await jobInstance.save(data); + const extras = { + frame_selection_method: data.frameSelectionMethod, + seed: data.seed, + frame_count: data.frameCount, + frames_per_job_count: data.framesPerJobCount, + }; + const savedJob = await jobInstance.save(extras); return savedJob; } catch (error) { dispatch(jobsActions.createJobFailed(error)); diff --git a/cvat-ui/src/actions/projects-actions.ts b/cvat-ui/src/actions/projects-actions.ts index 6ab08543caf4..7dab31145895 100644 --- a/cvat-ui/src/actions/projects-actions.ts +++ b/cvat-ui/src/actions/projects-actions.ts @@ -33,7 +33,7 @@ export enum ProjectsActionTypes { } const projectActions = { - getProjects: () => createAction(ProjectsActionTypes.GET_PROJECTS), + getProjects: (fetchingTimestamp: number) => createAction(ProjectsActionTypes.GET_PROJECTS, { fetchingTimestamp }), getProjectsSuccess: (array: any[], count: number) => ( createAction(ProjectsActionTypes.GET_PROJECTS_SUCCESS, { array, count }) ), @@ -86,8 +86,13 @@ export function getProjectTasksAsync(tasksQuery: Partial = {}): Thun export function getProjectsAsync( query: Partial, tasksQuery: Partial = {}, ): ThunkAction { - return async (dispatch: ThunkDispatch): Promise => { - dispatch(projectActions.getProjects()); + return async (dispatch: ThunkDispatch, getState): Promise => { + const requestedOn = Date.now(); + const isRequestRelevant = (): boolean => ( + getState().projects.fetchingTimestamp === requestedOn + ); + + dispatch(projectActions.getProjects(requestedOn)); dispatch(projectActions.updateProjectsGettingQuery(query, tasksQuery)); // Clear query object from null fields @@ -100,20 +105,22 @@ export function getProjectsAsync( try { result = await cvat.projects.get(filteredQuery); } catch (error) { - dispatch(projectActions.getProjectsFailed(error)); + if (isRequestRelevant()) { + dispatch(projectActions.getProjectsFailed(error)); + } return; } - const array = Array.from(result); - - dispatch(projectActions.getProjectsSuccess(array, result.count)); - - // Appropriate tasks fetching process needs with retrieving only a single project - if (Object.keys(filteredQuery).includes('id') && typeof filteredQuery.id === 'number') { - dispatch(getProjectTasksAsync({ - ...tasksQuery, - projectId: filteredQuery.id, - })); + if (isRequestRelevant()) { + const array = Array.from(result); + dispatch(projectActions.getProjectsSuccess(array, result.count)); + // Appropriate tasks fetching process needs with retrieving only a single project + if (Object.keys(filteredQuery).includes('id') && typeof filteredQuery.id === 'number') { + dispatch(getProjectTasksAsync({ + ...tasksQuery, + projectId: filteredQuery.id, + })); + } } }; } diff --git a/cvat-ui/src/actions/requests-actions.ts b/cvat-ui/src/actions/requests-actions.ts index 6a62e7cecf7c..f0e2d2f6adc1 100644 --- a/cvat-ui/src/actions/requests-actions.ts +++ b/cvat-ui/src/actions/requests-actions.ts @@ -3,12 +3,19 @@ // SPDX-License-Identifier: MIT import { ActionUnion, createAction } from 'utils/redux'; -import { RequestsQuery, RequestsState } from 'reducers'; -import { - Request, ProjectOrTaskOrJob, getCore, RQStatus, -} from 'cvat-core-wrapper'; +import { CombinedState, RequestsQuery } from 'reducers'; +import { Request, ProjectOrTaskOrJob, getCore } from 'cvat-core-wrapper'; +import { Store } from 'redux'; +import { getCVATStore } from 'cvat-store'; const core = getCore(); +let store: null | Store = null; +function getStore(): Store { + if (store === null) { + store = getCVATStore(); + } + return store; +} export enum RequestsActionsTypes { GET_REQUESTS = 'GET_REQUESTS', @@ -79,23 +86,15 @@ export function updateRequestProgress(request: Request, dispatch: (action: Reque ); } -export function shouldListenForProgress(rqID: string | undefined, state: RequestsState): boolean { - return ( - typeof rqID === 'string' && - (!state.requests[rqID] || [RQStatus.FINISHED, RQStatus.FAILED].includes(state.requests[rqID]?.status)) - ); -} - export function listen( requestID: string, dispatch: (action: RequestsActions) => void, - initialRequest?: Request, ) : Promise { - return core.requests - .listen(requestID, { - callback: (updatedRequest) => { - updateRequestProgress(updatedRequest, dispatch); - }, - initialRequest, - }); + const { requests } = getStore().getState().requests; + return core.requests.listen(requestID, { + callback: (updatedRequest) => { + updateRequestProgress(updatedRequest, dispatch); + }, + initialRequest: requests[requestID], + }); } diff --git a/cvat-ui/src/actions/requests-async-actions.ts b/cvat-ui/src/actions/requests-async-actions.ts index 04a5ffd0a5c5..86151cbd076e 100644 --- a/cvat-ui/src/actions/requests-async-actions.ts +++ b/cvat-ui/src/actions/requests-async-actions.ts @@ -8,7 +8,9 @@ import { getCore, RQStatus, Request, Project, Task, Job, } from 'cvat-core-wrapper'; import { listenExportBackupAsync, listenExportDatasetAsync } from './export-actions'; -import { RequestInstanceType, listen, requestsActions } from './requests-actions'; +import { + RequestInstanceType, listen, requestsActions, +} from './requests-actions'; import { listenImportBackupAsync, listenImportDatasetAsync } from './import-actions'; const core = getCore(); @@ -28,18 +30,21 @@ export function getRequestsAsync(query: RequestsQuery): ThunkAction { try { const requests = await core.requests.list(); + dispatch(requestsActions.getRequestsSuccess(requests)); requests .filter((request: Request) => [RQStatus.STARTED, RQStatus.QUEUED].includes(request.status)) .forEach((request: Request): void => { const { id: rqID, + status, operation: { type, target, format, taskID, projectID, jobID, }, } = request; - if (state.requests.requests[rqID]) { + const isRequestFinished = [RQStatus.FINISHED, RQStatus.FAILED].includes(status); + if (state.requests.requests[rqID] || isRequestFinished) { return; } @@ -80,7 +85,6 @@ export function getRequestsAsync(query: RequestsQuery): ThunkAction { } } }); - dispatch(requestsActions.getRequestsSuccess(requests)); } catch (error) { dispatch(requestsActions.getRequestsFailed(error)); } diff --git a/cvat-ui/src/actions/tasks-actions.ts b/cvat-ui/src/actions/tasks-actions.ts index 60e4da022ef2..644f5aa7b021 100644 --- a/cvat-ui/src/actions/tasks-actions.ts +++ b/cvat-ui/src/actions/tasks-actions.ts @@ -6,7 +6,7 @@ import { AnyAction } from 'redux'; import { TasksQuery, StorageLocation } from 'reducers'; import { - getCore, RQStatus, Storage, Task, + getCore, RQStatus, Storage, Task, UpdateStatusData, Request, } from 'cvat-core-wrapper'; import { filterNull } from 'utils/filter-null'; import { ThunkDispatch, ThunkAction } from 'utils/redux'; @@ -32,10 +32,11 @@ export enum TasksActionTypes { UPDATE_TASK_IN_STATE = 'UPDATE_TASK_IN_STATE', } -function getTasks(query: Partial, updateQuery: boolean): AnyAction { +function getTasks(query: Partial, updateQuery: boolean, fetchingTimestamp: number): AnyAction { const action = { type: TasksActionTypes.GET_TASKS, payload: { + fetchingTimestamp, updateQuery, query, }, @@ -69,23 +70,30 @@ export function getTasksAsync( query: Partial, updateQuery = true, ): ThunkAction { - return async (dispatch: ThunkDispatch): Promise => { - dispatch(getTasks(query, updateQuery)); + return async (dispatch: ThunkDispatch, getState): Promise => { + const requestedOn = Date.now(); + const isRequestRelevant = (): boolean => ( + getState().tasks.fetchingTimestamp === requestedOn + ); + dispatch(getTasks(query, updateQuery, requestedOn)); const filteredQuery = filterNull(query); let result = null; try { result = await cvat.tasks.get(filteredQuery); } catch (error) { - dispatch(getTasksFailed(error)); + if (isRequestRelevant()) { + dispatch(getTasksFailed(error)); + } return; } - const array = Array.from(result); - - dispatch(getInferenceStatusAsync()); - dispatch(getTasksSuccess(array, result.count)); + if (isRequestRelevant()) { + const array = Array.from(result); + dispatch(getInferenceStatusAsync()); + dispatch(getTasksSuccess(array, result.count)); + } }; } @@ -274,10 +282,10 @@ ThunkAction { taskInstance.remoteFiles = data.files.remote; try { const savedTask = await taskInstance.save(extras, { - requestStatusCallback(request) { - let { message } = request; + updateStatusCallback(updateData: Request | UpdateStatusData) { + let { message } = updateData; + const { status, progress } = updateData; let helperMessage = ''; - const { status, progress } = request; if (!message) { if ([RQStatus.QUEUED, RQStatus.STARTED].includes(status)) { message = 'CVAT queued the task to import'; @@ -291,7 +299,7 @@ ThunkAction { } } onProgress?.(`${message} ${progress ? `${Math.floor(progress * 100)}%` : ''}. ${helperMessage}`); - if (request.id) updateRequestProgress(request, dispatch); + if (updateData instanceof Request) updateRequestProgress(updateData, dispatch); }, }); diff --git a/cvat-ui/src/components/annotation-page/annotation-page.tsx b/cvat-ui/src/components/annotation-page/annotation-page.tsx index 37ba42116711..a04734b9c371 100644 --- a/cvat-ui/src/components/annotation-page/annotation-page.tsx +++ b/cvat-ui/src/components/annotation-page/annotation-page.tsx @@ -5,7 +5,6 @@ import React, { useEffect } from 'react'; import Layout from 'antd/lib/layout'; -import Result from 'antd/lib/result'; import Spin from 'antd/lib/spin'; import notification from 'antd/lib/notification'; import Button from 'antd/lib/button'; @@ -19,6 +18,7 @@ import StandardWorkspaceComponent from 'components/annotation-page/standard-work import StandardWorkspace3DComponent from 'components/annotation-page/standard3D-workspace/standard3D-workspace'; import TagAnnotationWorkspace from 'components/annotation-page/tag-annotation-workspace/tag-annotation-workspace'; import FiltersModalComponent from 'components/annotation-page/top-bar/filters-modal'; +import { JobNotFoundComponent } from 'components/common/not-found'; import StatisticsModalComponent from 'components/annotation-page/top-bar/statistics-modal'; import AnnotationTopBarContainer from 'containers/annotation-page/top-bar/top-bar'; import { Workspace } from 'reducers'; @@ -139,14 +139,7 @@ export default function AnnotationPageComponent(props: Props): JSX.Element { } if (typeof job === 'undefined') { - return ( - - ); + return ; } return ( diff --git a/cvat-ui/src/components/annotation-page/annotations-actions/annotations-actions-modal.tsx b/cvat-ui/src/components/annotation-page/annotations-actions/annotations-actions-modal.tsx index 27898da9fa2a..509dd42b9b35 100644 --- a/cvat-ui/src/components/annotation-page/annotations-actions/annotations-actions-modal.tsx +++ b/cvat-ui/src/components/annotation-page/annotations-actions/annotations-actions-modal.tsx @@ -4,9 +4,12 @@ import './styles.scss'; -import React, { - useEffect, useReducer, useRef, useState, -} from 'react'; +import React, { useEffect, useRef, useState } from 'react'; +import { createStore } from 'redux'; +import { + Provider, shallowEqual, useDispatch, useSelector, +} from 'react-redux'; +import { createRoot } from 'react-dom/client'; import Button from 'antd/lib/button'; import { Col, Row } from 'antd/lib/grid'; import Progress from 'antd/lib/progress'; @@ -16,56 +19,59 @@ import Text from 'antd/lib/typography/Text'; import Modal from 'antd/lib/modal'; import Alert from 'antd/lib/alert'; import InputNumber from 'antd/lib/input-number'; +import Switch from 'antd/lib/switch'; import config from 'config'; -import { useIsMounted } from 'utils/hooks'; import { createAction, ActionUnion } from 'utils/redux'; import { getCVATStore } from 'cvat-store'; import { - BaseSingleFrameAction, FrameSelectionType, Job, getCore, + BaseCollectionAction, BaseAction, Job, getCore, + ObjectState, ActionParameterType, } from 'cvat-core-wrapper'; import { Canvas } from 'cvat-canvas-wrapper'; -import { fetchAnnotationsAsync, saveAnnotationsAsync } from 'actions/annotation-actions'; -import { switchAutoSave } from 'actions/settings-actions'; +import { fetchAnnotationsAsync } from 'actions/annotation-actions'; import { clamp } from 'utils/math'; const core = getCore(); interface State { - actions: BaseSingleFrameAction[]; - activeAction: BaseSingleFrameAction | null; + actions: BaseAction[]; + activeAction: BaseAction | null; + initialized: boolean; fetching: boolean; progress: number | null; progressMessage: string | null; cancelled: boolean; - autoSaveEnabled: boolean; - jobHasBeenSaved: boolean; frameFrom: number; frameTo: number; - actionParameters: Record; + actionParameters: Record>; modalVisible: boolean; + targetObjectState?: ObjectState | null; } enum ReducerActionType { + SET_INITIALIZED = 'SET_INITIALIZED', SET_ANNOTATIONS_ACTIONS = 'SET_ANNOTATIONS_ACTIONS', SET_ACTIVE_ANNOTATIONS_ACTION = 'SET_ACTIVE_ANNOTATIONS_ACTION', UPDATE_PROGRESS = 'UPDATE_PROGRESS', RESET_BEFORE_RUN = 'RESET_BEFORE_RUN', RESET_AFTER_RUN = 'RESET_AFTER_RUN', CANCEL_ACTION = 'CANCEL_ACTION', - SET_AUTOSAVE_DISABLED_FLAG = 'SET_AUTOSAVE_DISABLED_FLAG', - SET_JOB_WAS_SAVED_FLAG = 'SET_JOB_WAS_SAVED_FLAG', UPDATE_FRAME_FROM = 'UPDATE_FRAME_FROM', UPDATE_FRAME_TO = 'UPDATE_FRAME_TO', + UPDATE_TARGET_OBJECT_STATE = 'UPDATE_TARGET_OBJECT_STATE', UPDATE_ACTION_PARAMETER = 'UPDATE_ACTION_PARAMETER', SET_VISIBLE = 'SET_VISIBLE', } export const reducerActions = { - setAnnotationsActions: (actions: BaseSingleFrameAction[]) => ( + setInitialized: (initialized: boolean) => ( + createAction(ReducerActionType.SET_INITIALIZED, { initialized }) + ), + setAnnotationsActions: (actions: BaseAction[]) => ( createAction(ReducerActionType.SET_ANNOTATIONS_ACTIONS, { actions }) ), - setActiveAnnotationsAction: (activeAction: BaseSingleFrameAction) => ( + setActiveAnnotationsAction: (activeAction: BaseAction) => ( createAction(ReducerActionType.SET_ACTIVE_ANNOTATIONS_ACTION, { activeAction }) ), updateProgress: (progress: number | null, progressMessage: string | null) => ( @@ -80,18 +86,15 @@ export const reducerActions = { cancelAction: () => ( createAction(ReducerActionType.CANCEL_ACTION) ), - setAutoSaveDisabledFlag: () => ( - createAction(ReducerActionType.SET_AUTOSAVE_DISABLED_FLAG) - ), - setJobSavedFlag: (jobHasBeenSaved: boolean) => ( - createAction(ReducerActionType.SET_JOB_WAS_SAVED_FLAG, { jobHasBeenSaved }) - ), updateFrameFrom: (frameFrom: number) => ( createAction(ReducerActionType.UPDATE_FRAME_FROM, { frameFrom }) ), updateFrameTo: (frameTo: number) => ( createAction(ReducerActionType.UPDATE_FRAME_TO, { frameTo }) ), + updateTargetObjectState: (targetObjectState: ObjectState | null) => ( + createAction(ReducerActionType.UPDATE_TARGET_OBJECT_STATE, { targetObjectState }) + ), updateActionParameter: (name: string, value: string) => ( createAction(ReducerActionType.UPDATE_ACTION_PARAMETER, { name, value }) ), @@ -100,34 +103,54 @@ export const reducerActions = { ), }; -const reducer = (state: State, action: ActionUnion): State => { +const defaultState = { + actions: [], + initialized: false, + fetching: false, + activeAction: null, + progress: null, + progressMessage: null, + cancelled: false, + frameFrom: 0, + frameTo: 0, + actionParameters: {}, + modalVisible: true, + targetObjectState: null, +}; + +const reducer = (state: State = { ...defaultState }, action: ActionUnion): State => { + if (action.type === ReducerActionType.SET_INITIALIZED) { + return { + ...state, + initialized: action.payload.initialized, + }; + } + if (action.type === ReducerActionType.SET_ANNOTATIONS_ACTIONS) { + const { actions } = action.payload; + const { targetObjectState } = state; + + const filteredActions = targetObjectState ? actions + .filter((_action) => _action.isApplicableForObject(targetObjectState)) : actions; return { ...state, - actions: action.payload.actions, - activeAction: action.payload.actions[0] || null, - actionParameters: {}, + actions, + activeAction: filteredActions[0] ?? null, }; } if (action.type === ReducerActionType.SET_ACTIVE_ANNOTATIONS_ACTION) { - const { frameSelection } = action.payload.activeAction; - if (frameSelection === FrameSelectionType.CURRENT_FRAME) { - const storage = getCVATStore(); - const currentFrame = storage.getState().annotation.player.frame.number; + const { activeAction } = action.payload; + const { targetObjectState } = state; + + if (!targetObjectState || activeAction.isApplicableForObject(targetObjectState)) { return { ...state, - frameFrom: currentFrame, - frameTo: currentFrame, - activeAction: action.payload.activeAction, - actionParameters: {}, + activeAction, }; } - return { - ...state, - activeAction: action.payload.activeAction, - actionParameters: {}, - }; + + return state; } if (action.type === ReducerActionType.UPDATE_PROGRESS) { @@ -163,20 +186,6 @@ const reducer = (state: State, action: ActionUnion): Stat }; } - if (action.type === ReducerActionType.SET_AUTOSAVE_DISABLED_FLAG) { - return { - ...state, - autoSaveEnabled: false, - }; - } - - if (action.type === ReducerActionType.SET_JOB_WAS_SAVED_FLAG) { - return { - ...state, - jobHasBeenSaved: action.payload.jobHasBeenSaved, - }; - } - if (action.type === ReducerActionType.UPDATE_FRAME_FROM) { return { ...state, @@ -194,11 +203,15 @@ const reducer = (state: State, action: ActionUnion): Stat } if (action.type === ReducerActionType.UPDATE_ACTION_PARAMETER) { + const currentActionName = (state.activeAction as BaseAction).name; return { ...state, actionParameters: { ...state.actionParameters, - [action.payload.name]: action.payload.value, + [currentActionName]: { + ...state.actionParameters[currentActionName] ?? {}, + [action.payload.name]: action.payload.value, + }, }, }; } @@ -210,12 +223,43 @@ const reducer = (state: State, action: ActionUnion): Stat }; } + if (action.type === ReducerActionType.UPDATE_TARGET_OBJECT_STATE) { + const { targetObjectState } = action.payload; + let { activeAction } = state; + + if (activeAction && targetObjectState && !activeAction.isApplicableForObject(targetObjectState)) { + const filtered = state.actions.filter((_action) => _action.isApplicableForObject(targetObjectState)); + activeAction = filtered[0] ?? null; + } + + return { + ...state, + activeAction, + targetObjectState: action.payload.targetObjectState, + }; + } + return state; }; -type Props = NonNullable[keyof BaseSingleFrameAction['parameters']]; - -function ActionParameterComponent(props: Props & { onChange: (value: string) => void }): JSX.Element { +type ActionParameterProps = NonNullable[keyof BaseAction['parameters']]; + +const componentStorage = createStore(reducer, { + actions: [], + initialized: false, + fetching: false, + activeAction: null, + progress: null, + progressMessage: null, + cancelled: false, + frameFrom: 0, + frameTo: 0, + actionParameters: {}, + modalVisible: true, + targetObjectState: null, +}); + +function ActionParameterComponent(props: ActionParameterProps & { onChange: (value: string) => void }): JSX.Element { const { defaultValue, type, values, onChange, } = props; @@ -230,7 +274,7 @@ function ActionParameterComponent(props: Props & { onChange: (value: string) => const computedValues = typeof values === 'function' ? values({ instance: job }) : values; - if (type === 'select') { + if (type === ActionParameterType.SELECT) { return ( { - const newActiveAction = actions + const newActiveAction = filteredActions .find((action) => action.name === newActiveActionName); if (newActiveAction) { dispatch(reducerActions.setActiveAnnotationsAction(newActiveAction)); } }} > - {actions.map( - (annotationFunction: BaseSingleFrameAction): JSX.Element => ( + {filteredActions.map( + (annotationFunction: BaseAction): JSX.Element => ( void; }): JSX.El - {activeAction ? ( + {activeAction && !currentFrameAction ? ( <> - 2. Specify frames to apply the action + Specify frames to apply the action
- { - currentFrameAction ? ( - Running the action is only allowed on current frame - ) : ( - <> - Starting from frame - { - if (typeof value === 'number') { - dispatch(reducerActions.updateFrameFrom( - clamp( - Math.round(value), - jobInstance.startFrame, - frameTo, - ), - )); - } - }} - /> - up to frame - { - if (typeof value === 'number') { - dispatch(reducerActions.updateFrameTo( - clamp( - Math.round(value), - frameFrom, - jobInstance.stopFrame, - ), - )); - } - }} - /> - - - ) - } + Starting from frame + { + if (typeof value === 'number') { + dispatch(reducerActions.updateFrameFrom( + clamp( + Math.round(value), + jobInstance.startFrame, + frameTo, + ), + )); + } + }} + /> + up to frame + { + if (typeof value === 'number') { + dispatch(reducerActions.updateFrameTo( + clamp( + Math.round(value), + frameFrom, + jobInstance.stopFrame, + ), + )); + } + }} + />
@@ -534,18 +526,22 @@ function AnnotationsActionsModalContent(props: { onClose: () => void; }): JSX.El - 3. Setup action parameters + Setup action parameters
{Object.entries(activeAction.parameters) .map(([name, { defaultValue, type, values }], idx) => ( - + {name} { dispatch(reducerActions.updateActionParameter(name, value)); }} - defaultValue={defaultValue} + defaultValue={actionParameters[activeAction.name]?.[name] ?? defaultValue} type={type} values={values} /> @@ -568,7 +564,7 @@ function AnnotationsActionsModalContent(props: { onClose: () => void; }): JSX.El + + ); +} + export default function ItemMenu(props: Props): MenuProps { const { readonly, shapeType, objectType, colorBy, jobInstance, @@ -249,6 +269,7 @@ export default function ItemMenu(props: Props): MenuProps { REMOVE_ITEM = 'remove_item', EDIT_MASK = 'edit_mask', SLICE_ITEM = 'slice_item', + RUN_ANNOTATION_ACTION = 'run_annotation_action', } const is2D = jobInstance.dimension === DimensionType.DIMENSION_2D; @@ -326,6 +347,13 @@ export default function ItemMenu(props: Props): MenuProps { }); } + if (!readonly) { + items.push({ + key: MenuKeys.RUN_ANNOTATION_ACTION, + label: , + }); + } + return { items, selectable: false, diff --git a/cvat-ui/src/components/annotation-page/standard-workspace/objects-side-bar/object-item.tsx b/cvat-ui/src/components/annotation-page/standard-workspace/objects-side-bar/object-item.tsx index 9e6ff1f609c2..7ae46a7a71a3 100644 --- a/cvat-ui/src/components/annotation-page/standard-workspace/objects-side-bar/object-item.tsx +++ b/cvat-ui/src/components/annotation-page/standard-workspace/objects-side-bar/object-item.tsx @@ -41,6 +41,7 @@ interface Props { changeLabel(label: any): void; changeColor(color: string): void; resetCuboidPerspective(): void; + runAnnotationAction(): void; edit(): void; slice(): void; } @@ -73,6 +74,7 @@ function ObjectItemComponent(props: Props): JSX.Element { changeLabel, changeColor, resetCuboidPerspective, + runAnnotationAction, edit, slice, jobInstance, @@ -118,9 +120,10 @@ function ObjectItemComponent(props: Props): JSX.Element { propagateShortcut={normalizedKeyMap.PROPAGATE_OBJECT} toBackgroundShortcut={normalizedKeyMap.TO_BACKGROUND} toForegroundShortcut={normalizedKeyMap.TO_FOREGROUND} - removeShortcut={normalizedKeyMap.DELETE_OBJECT} + removeShortcut={normalizedKeyMap.DELETE_OBJECT_STANDARD_WORKSPACE} changeColorShortcut={normalizedKeyMap.CHANGE_OBJECT_COLOR} sliceShortcut={normalizedKeyMap.SWITCH_SLICE_MODE} + runAnnotationsActionShortcut={normalizedKeyMap.RUN_ANNOTATIONS_ACTION} changeLabel={changeLabel} changeColor={changeColor} copy={copy} @@ -133,6 +136,7 @@ function ObjectItemComponent(props: Props): JSX.Element { resetCuboidPerspective={resetCuboidPerspective} edit={edit} slice={slice} + runAnnotationAction={runAnnotationAction} /> {!!attributes.length && ( diff --git a/cvat-ui/src/components/annotation-page/standard-workspace/objects-side-bar/styles.scss b/cvat-ui/src/components/annotation-page/standard-workspace/objects-side-bar/styles.scss index b573bb61618d..b91cc68bd8d1 100644 --- a/cvat-ui/src/components/annotation-page/standard-workspace/objects-side-bar/styles.scss +++ b/cvat-ui/src/components/annotation-page/standard-workspace/objects-side-bar/styles.scss @@ -402,7 +402,10 @@ } button { - color: $text-color; + &:not(:disabled) { + color: $text-color; + } + width: 100%; height: 100%; text-align: left; diff --git a/cvat-ui/src/components/annotation-page/standard-workspace/propagate-confirm.tsx b/cvat-ui/src/components/annotation-page/standard-workspace/propagate-confirm.tsx index 511d4454bce2..4d57ed9faf05 100644 --- a/cvat-ui/src/components/annotation-page/standard-workspace/propagate-confirm.tsx +++ b/cvat-ui/src/components/annotation-page/standard-workspace/propagate-confirm.tsx @@ -4,7 +4,7 @@ // SPDX-License-Identifier: MIT import React, { useEffect, useState } from 'react'; -import { useDispatch, useSelector } from 'react-redux'; +import { shallowEqual, useDispatch, useSelector } from 'react-redux'; import Modal from 'antd/lib/modal'; import InputNumber from 'antd/lib/input-number'; import Text from 'antd/lib/typography/Text'; @@ -23,12 +23,19 @@ export enum PropagateDirection { function PropagateConfirmComponent(): JSX.Element { const dispatch = useDispatch(); - const visible = useSelector((state: CombinedState) => state.annotation.propagate.visible); - const frameNumber = useSelector((state: CombinedState) => state.annotation.player.frame.number); - const startFrame = useSelector((state: CombinedState) => state.annotation.job.instance.startFrame); - const stopFrame = useSelector((state: CombinedState) => state.annotation.job.instance.stopFrame); - const [targetFrame, setTargetFrame] = useState(frameNumber); + const { + visible, + frameNumber, + frameNumbers, + } = useSelector((state: CombinedState) => ({ + visible: state.annotation.propagate.visible, + frameNumber: state.annotation.player.frame.number, + frameNumbers: state.annotation.job.frameNumbers, + }), shallowEqual); + const [targetFrame, setTargetFrame] = useState(frameNumber); + const startFrame = frameNumbers[0]; + const stopFrame = frameNumbers[frameNumbers.length - 1]; const propagateFrames = Math.abs(targetFrame - frameNumber); const propagateDirection = targetFrame >= frameNumber ? PropagateDirection.FORWARD : PropagateDirection.BACKWARD; @@ -93,9 +100,9 @@ function PropagateConfirmComponent(): JSX.Element { size='small' min={0} value={propagateFrames} - onChange={(value: number) => { - if (typeof value !== 'undefined') { - updateTargetFrame(propagateDirection, +value); + onChange={(value: number | null) => { + if (typeof value === 'number') { + updateTargetFrame(propagateDirection, value); } }} /> @@ -115,7 +122,7 @@ function PropagateConfirmComponent(): JSX.Element { [frameNumber]: 'FROM', [targetFrame]: 'TO', } : undefined} - onChange={([value1, value2]: [number, number]) => { + onChange={([value1, value2]: number[]) => { const value = value1 === frameNumber || value1 === targetFrame ? value2 : value1; if (value < frameNumber) { setTargetFrame(clamp(value, startFrame, frameNumber)); @@ -133,8 +140,8 @@ function PropagateConfirmComponent(): JSX.Element { min={startFrame} max={stopFrame} value={targetFrame} - onChange={(value: number) => { - if (typeof value !== 'undefined') { + onChange={(value: number | null) => { + if (typeof value === 'number') { if (value > frameNumber) { setTargetFrame(clamp(+value, frameNumber, stopFrame)); } else if (value < frameNumber) { diff --git a/cvat-ui/src/components/annotation-page/tag-annotation-workspace/frame-tags.tsx b/cvat-ui/src/components/annotation-page/tag-annotation-workspace/frame-tags.tsx index 145ec6c1c9b5..4c286122ceb1 100644 --- a/cvat-ui/src/components/annotation-page/tag-annotation-workspace/frame-tags.tsx +++ b/cvat-ui/src/components/annotation-page/tag-annotation-workspace/frame-tags.tsx @@ -1,64 +1,34 @@ // Copyright (C) 2022 Intel Corporation +// Copyright (C) 2024 CVAT.ai Corporation // // SPDX-License-Identifier: MIT import './styles.scss'; -import React, { useState, useEffect } from 'react'; +import React, { useState, useEffect, useCallback } from 'react'; import Tag from 'antd/lib/tag'; -import { connect } from 'react-redux'; -import { Action } from 'redux'; -import { ThunkDispatch } from 'redux-thunk'; +import { shallowEqual, useDispatch, useSelector } from 'react-redux'; import { removeObject as removeObjectAction, } from 'actions/annotation-actions'; -import { CombinedState, ObjectType, Workspace } from 'reducers'; -import { - QualityConflict, ObjectState, AnnotationConflict, getCore, -} from 'cvat-core-wrapper'; +import { CombinedState, ObjectType } from 'reducers'; +import { ObjectState, AnnotationConflict } from 'cvat-core-wrapper'; import { filterAnnotations } from 'utils/filter-annotations'; -const core = getCore(); - -interface StateToProps { - highlightedConflict: QualityConflict | null; - states: ObjectState[]; - workspace: Workspace; -} - -interface DispatchToProps { - removeObject(objectState: any): void; -} - -function mapStateToProps(state: CombinedState): StateToProps { - const { - annotation: { - annotations: { highlightedConflict, states }, - workspace, - }, - } = state; +function FrameTags(): JSX.Element { + const dispatch = useDispatch(); - return { highlightedConflict, states, workspace }; -} - -function mapDispatchToProps(dispatch: ThunkDispatch): DispatchToProps { - return { - removeObject(objectState: ObjectState): void { - dispatch(removeObjectAction(objectState, false)); - }, - }; -} - -function FrameTags(props: StateToProps & DispatchToProps): JSX.Element { - const { - highlightedConflict, states, workspace, removeObject, - } = props; + const { highlightedConflict, states, workspace } = useSelector((state: CombinedState) => ({ + highlightedConflict: state.annotation.annotations.highlightedConflict, + states: state.annotation.annotations.states, + workspace: state.annotation.workspace, + }), shallowEqual); - const [frameTags, setFrameTags] = useState([] as ObjectState[]); + const [frameTags, setFrameTags] = useState([]); const onRemoveState = (objectState: ObjectState): void => { - removeObject(objectState); + dispatch(removeObjectAction(objectState, false)); }; useEffect(() => { @@ -67,16 +37,20 @@ function FrameTags(props: StateToProps & DispatchToProps): JSX.Element { ); }, [states]); + const tagClassName = useCallback((tag: ObjectState): string => { + const tagHighlighted = (highlightedConflict?.annotationConflicts || []) + .find((conflict: AnnotationConflict) => conflict.serverID === tag.serverID); + return tagHighlighted ? 'cvat-frame-tag-highlighted' : 'cvat-frame-tag'; + }, [highlightedConflict]); + return ( <> -
+
{frameTags - .filter((tag: any) => tag.source !== core.enums.Source.GT) - .map((tag: any) => ( + .filter((tag: ObjectState) => !tag.isGroundTruth) + .map((tag: ObjectState) => ( conflict.serverID === tag.serverID).length !== 0 ? 'cvat-frame-tag-highlighted' : 'cvat-frame-tag' - } + className={tagClassName(tag)} color={tag.label.color} onClose={() => { onRemoveState(tag); @@ -88,14 +62,12 @@ function FrameTags(props: StateToProps & DispatchToProps): JSX.Element { ))}
-
+
{frameTags - .filter((tag: any) => tag.source === core.enums.Source.GT) - .map((tag: any) => ( + .filter((tag: ObjectState) => tag.isGroundTruth) + .map((tag: ObjectState) => ( conflict.serverID === tag.serverID).length !== 0 ? 'cvat-frame-tag-highlighted' : 'cvat-frame-tag' - } + className={tagClassName(tag)} color={tag.label.color} onClose={() => { onRemoveState(tag); @@ -112,4 +84,4 @@ function FrameTags(props: StateToProps & DispatchToProps): JSX.Element { ); } -export default connect(mapStateToProps, mapDispatchToProps)(FrameTags); +export default React.memo(FrameTags); diff --git a/cvat-ui/src/components/annotation-page/tag-annotation-workspace/styles.scss b/cvat-ui/src/components/annotation-page/tag-annotation-workspace/styles.scss index 8ac9962e01e8..0740e3c9046d 100644 --- a/cvat-ui/src/components/annotation-page/tag-annotation-workspace/styles.scss +++ b/cvat-ui/src/components/annotation-page/tag-annotation-workspace/styles.scss @@ -96,3 +96,7 @@ transform: scale(1.1); } + +.cvat-canvas-annotation-frame-tags { + margin-bottom: $grid-unit-size; +} diff --git a/cvat-ui/src/components/annotation-page/top-bar/annotation-menu.tsx b/cvat-ui/src/components/annotation-page/top-bar/annotation-menu.tsx index f845b30233df..522f5f978b74 100644 --- a/cvat-ui/src/components/annotation-page/top-bar/annotation-menu.tsx +++ b/cvat-ui/src/components/annotation-page/top-bar/annotation-menu.tsx @@ -6,7 +6,6 @@ import React, { useCallback, useState } from 'react'; import { useSelector, useDispatch } from 'react-redux'; import { useHistory } from 'react-router'; -import { createRoot } from 'react-dom/client'; import Modal from 'antd/lib/modal'; import Text from 'antd/lib/typography/Text'; import InputNumber from 'antd/lib/input-number'; @@ -22,7 +21,7 @@ import { MainMenuIcon } from 'icons'; import { Job, JobState } from 'cvat-core-wrapper'; import CVATTooltip from 'components/common/cvat-tooltip'; -import AnnotationsActionsModalContent from 'components/annotation-page/annotations-actions/annotations-actions-modal'; +import { openAnnotationsActionModal } from 'components/annotation-page/annotations-actions/annotations-actions-modal'; import { CombinedState } from 'reducers'; import { updateCurrentJobAsync, finishCurrentJobAsync, @@ -179,17 +178,7 @@ function AnnotationMenuComponent(): JSX.Element { key: Actions.RUN_ACTIONS, label: 'Run actions', onClick: () => { - const div = window.document.createElement('div'); - window.document.body.append(div); - const root = createRoot(div); - root.render( - { - root.unmount(); - div.remove(); - }} - />, - ); + openAnnotationsActionModal(); }, }); diff --git a/cvat-ui/src/components/common/not-found.tsx b/cvat-ui/src/components/common/not-found.tsx new file mode 100644 index 000000000000..a9b9a15787f7 --- /dev/null +++ b/cvat-ui/src/components/common/not-found.tsx @@ -0,0 +1,42 @@ +// Copyright (C) 2024 CVAT.ai Corporation +// +// SPDX-License-Identifier: MIT + +import React from 'react'; +import Result from 'antd/lib/result'; + +export const JobNotFoundComponent = React.memo((): JSX.Element => ( + +)); + +export const TaskNotFoundComponent = React.memo((): JSX.Element => ( + +)); + +export const ProjectNotFoundComponent = React.memo((): JSX.Element => ( + +)); + +export const CloudStorageNotFoundComponent = React.memo((): JSX.Element => ( + +)); diff --git a/cvat-ui/src/components/common/preview.tsx b/cvat-ui/src/components/common/preview.tsx index b96066b7fa40..ce0a7bbea979 100644 --- a/cvat-ui/src/components/common/preview.tsx +++ b/cvat-ui/src/components/common/preview.tsx @@ -11,9 +11,8 @@ import { getJobPreviewAsync } from 'actions/jobs-actions'; import { getTaskPreviewAsync } from 'actions/tasks-actions'; import { getProjectsPreviewAsync } from 'actions/projects-actions'; import { getCloudStoragePreviewAsync } from 'actions/cloud-storage-actions'; -import { - CombinedState, Job, Task, Project, CloudStorage, -} from 'reducers'; +import { CombinedState, CloudStorage } from 'reducers'; +import { Job, Task, Project } from 'cvat-core-wrapper'; import MLModel from 'cvat-core/src/ml-model'; import { getModelPreviewAsync } from 'actions/models-actions'; diff --git a/cvat-ui/src/components/create-job-page/create-job-page.tsx b/cvat-ui/src/components/create-job-page/create-job-page.tsx index 4c46e1b99fb6..7555d0fade3d 100644 --- a/cvat-ui/src/components/create-job-page/create-job-page.tsx +++ b/cvat-ui/src/components/create-job-page/create-job-page.tsx @@ -1,4 +1,4 @@ -// Copyright (C) 2023 CVAT.ai Corporation +// Copyright (C) 2023-2024 CVAT.ai Corporation // // SPDX-License-Identifier: MIT @@ -10,9 +10,9 @@ import { Row, Col } from 'antd/lib/grid'; import Text from 'antd/lib/typography/Text'; import Spin from 'antd/lib/spin'; import notification from 'antd/lib/notification'; -import { Task } from 'reducers'; +import { TaskNotFoundComponent } from 'components/common/not-found'; import { useIsMounted } from 'utils/hooks'; -import { getCore } from 'cvat-core-wrapper'; +import { getCore, Task } from 'cvat-core-wrapper'; import JobForm from './job-form'; const core = getCore(); @@ -50,6 +50,15 @@ function CreateJobPage(): JSX.Element { setFetchingTask(false); } }, []); + + if (fetchingTask) { + return ; + } + + if (!taskInstance) { + return ; + } + return (
@@ -57,19 +66,11 @@ function CreateJobPage(): JSX.Element { Add a new job - { - fetchingTask ? ( -
- -
- ) : ( - - - - - - ) - } + + + + +
); } diff --git a/cvat-ui/src/components/create-job-page/job-form.tsx b/cvat-ui/src/components/create-job-page/job-form.tsx index b94d8a5f3fac..a6d4a63c7103 100644 --- a/cvat-ui/src/components/create-job-page/job-form.tsx +++ b/cvat-ui/src/components/create-job-page/job-form.tsx @@ -4,7 +4,7 @@ import './styles.scss'; -import React, { useCallback, useState } from 'react'; +import React, { useCallback, useEffect, useState } from 'react'; import { useHistory } from 'react-router'; import { useDispatch } from 'react-redux'; import { Row, Col } from 'antd/lib/grid'; @@ -26,19 +26,20 @@ export enum FrameSelectionMethod { } interface JobDataMutual { - task_id: number; - frame_selection_method: FrameSelectionMethod; + taskID: number; + frameSelectionMethod: FrameSelectionMethod; type: JobType; seed?: number; } export interface JobData extends JobDataMutual { - frame_count: number; + frameCount?: number; + framesPerJobCount?: number; } export interface JobFormData extends JobDataMutual { quantity: number; - frame_count: number; + frameCount: number; } interface Props { @@ -49,21 +50,24 @@ const defaultQuantity = 5; function JobForm(props: Props): JSX.Element { const { task } = props; - const { size: taskSize } = task; + const { size: taskSize, segmentSize } = task; const [form] = Form.useForm(); const dispatch = useDispatch(); const history = useHistory(); const [fetching, setFetching] = useState(false); + const [frameSelectionMethod, setFrameSelectionMethod] = useState(FrameSelectionMethod.RANDOM); const submit = useCallback(async (): Promise => { try { const values: JobFormData = await form.validateFields(); const data: JobData = { - frame_selection_method: values.frame_selection_method, + taskID: task.id, + frameSelectionMethod: values.frameSelectionMethod, type: values.type, seed: values.seed, - frame_count: values.frame_count, - task_id: task.id, + ...(values.frameSelectionMethod === FrameSelectionMethod.RANDOM ? + { frameCount: values.frameCount } : { framesPerJobCount: values.frameCount } + ), }; const createdJob = await dispatch(createJobAsync(data)); @@ -86,24 +90,40 @@ function JobForm(props: Props): JSX.Element { } }; + const sizeBase = (): number => { + if (frameSelectionMethod === FrameSelectionMethod.RANDOM) { + return taskSize; + } + return segmentSize; + }; + + const quantityFromFrameCount = (value: number): number => Math.floor((value / sizeBase()) * 100); + const frameCountFromQuantity = (value: number): number => Math.round((value * sizeBase()) / 100); + const onQuantityChange = useCallback((value: number | null) => { if (value) { - const newFrameCount = Math.round((value * taskSize) / 100); + const newFrameCount = frameCountFromQuantity(value); form.setFieldsValue({ - frame_count: newFrameCount, + frameCount: newFrameCount, }); } - }, [taskSize]); + }, [taskSize, frameSelectionMethod, segmentSize]); const onFrameCountChange = useCallback((value: number | null) => { if (value) { - const newQuantity = Math.floor((value / taskSize) * 100); + const newQuantity = quantityFromFrameCount(value); form.setFieldsValue({ quantity: newQuantity, }); } - }, [taskSize]); - const frameCountDescription = 'A representative set, 5-15% of randomly chosen frames is recommended'; + }, [taskSize, frameSelectionMethod, segmentSize]); + + useEffect(() => { + const currentQuantity = form.getFieldValue('quantity'); + onQuantityChange(currentQuantity); + }, [form, frameSelectionMethod]); + + const description = 'A representative set, 5-15% of randomly chosen frames is recommended'; return ( @@ -113,9 +133,9 @@ function JobForm(props: Props): JSX.Element { layout='vertical' initialValues={{ type: JobType.GROUND_TRUTH, - frame_selection_method: FrameSelectionMethod.RANDOM, + frameSelectionMethod: FrameSelectionMethod.RANDOM, quantity: defaultQuantity, - frame_count: Math.floor((defaultQuantity * taskSize) / 100), + frameCount: frameCountFromQuantity(defaultQuantity), }} > @@ -134,13 +154,14 @@ function JobForm(props: Props): JSX.Element { + + Image size + + + Group bbox size + + + + + + Line Comparison diff --git a/cvat-ui/src/components/quality-control/task-quality/summary.tsx b/cvat-ui/src/components/quality-control/task-quality/summary.tsx index 59767192ed0d..30fc859feb54 100644 --- a/cvat-ui/src/components/quality-control/task-quality/summary.tsx +++ b/cvat-ui/src/components/quality-control/task-quality/summary.tsx @@ -5,40 +5,51 @@ import React from 'react'; import { Row, Col } from 'antd/es/grid'; import Text from 'antd/lib/typography/Text'; + import AnalyticsCard from 'components/analytics-page/views/analytics-card'; export interface Props { + mode: 'gt' | 'gt_pool' excludedCount: number; totalCount: number; activeCount: number; } export default function SummaryComponent(props: Readonly): JSX.Element { - const { excludedCount, totalCount, activeCount } = props; + const { + excludedCount, totalCount, activeCount, mode, + } = props; const reportInfo = ( - + - Excluded count: + Validation mode: {' '} - {excludedCount} + {mode === 'gt' ? 'Ground Truth' : 'Honeypots'} - Total count: + Total validation frames: {' '} {totalCount} + + + Excluded validation frames: + {' '} + {excludedCount} + + - Active count: + Active validation frames: {' '} {activeCount} diff --git a/cvat-ui/src/components/requests-page/request-card.tsx b/cvat-ui/src/components/requests-page/request-card.tsx index 980af114563a..dd2a8886941a 100644 --- a/cvat-ui/src/components/requests-page/request-card.tsx +++ b/cvat-ui/src/components/requests-page/request-card.tsx @@ -100,11 +100,15 @@ function constructTimestamps(request: Request): JSX.Element { ); } case RQStatus.FAILED: { - return ( + return (request.startedDate ? ( {`Started by ${request.owner.username} on ${started}`} - ); + ) : ( + + {`Enqueued by ${request.owner.username} on ${created}`} + + )); } case RQStatus.STARTED: { return ( @@ -145,7 +149,7 @@ function RequestCard(props: Props): JSX.Element { const dispatch = useDispatch(); const linkToEntity = constructLink(request); - const percent = request.status === RQStatus.FINISHED ? 100 : request.progress; + const percent = request.status === RQStatus.FINISHED ? 100 : (request.progress ?? 0) * 100; const timestamps = constructTimestamps(request); const name = constructName(operation); diff --git a/cvat-ui/src/components/task-page/task-page.tsx b/cvat-ui/src/components/task-page/task-page.tsx index 1d92a5e4eb59..a371a113c3d6 100644 --- a/cvat-ui/src/components/task-page/task-page.tsx +++ b/cvat-ui/src/components/task-page/task-page.tsx @@ -9,12 +9,12 @@ import { useHistory, useParams } from 'react-router'; import { useDispatch, useSelector } from 'react-redux'; import { Row, Col } from 'antd/lib/grid'; import Spin from 'antd/lib/spin'; -import Result from 'antd/lib/result'; import notification from 'antd/lib/notification'; import { getInferenceStatusAsync } from 'actions/models-actions'; import { updateJobAsync } from 'actions/jobs-actions'; import { getCore, Task, Job } from 'cvat-core-wrapper'; +import { TaskNotFoundComponent } from 'components/common/not-found'; import JobListComponent from 'components/task-page/job-list'; import ModelRunnerModal from 'components/model-runner-modal/model-runner-dialog'; import CVATLoadingSpinner from 'components/common/loading-spinner'; @@ -78,14 +78,7 @@ function TaskPageComponent(): JSX.Element { } if (!taskInstance) { - return ( - - ); + return ; } const onUpdateTask = (task: Task): Promise => ( diff --git a/cvat-ui/src/components/tasks-page/automatic-annotation-progress.tsx b/cvat-ui/src/components/tasks-page/automatic-annotation-progress.tsx index 8dbb152ef9e0..8b2ecd507680 100644 --- a/cvat-ui/src/components/tasks-page/automatic-annotation-progress.tsx +++ b/cvat-ui/src/components/tasks-page/automatic-annotation-progress.tsx @@ -63,7 +63,7 @@ export default function AutomaticAnnotationProgress(props: Props): JSX.Element | return (<>Unknown status received); } - return <>Automatic annotation accomplisted; + return <>Automatic annotation accomplished; })()}
diff --git a/cvat-ui/src/components/update-cloud-storage-page/update-cloud-storage-page.tsx b/cvat-ui/src/components/update-cloud-storage-page/update-cloud-storage-page.tsx index 52afa4c97377..ef1631545b48 100644 --- a/cvat-ui/src/components/update-cloud-storage-page/update-cloud-storage-page.tsx +++ b/cvat-ui/src/components/update-cloud-storage-page/update-cloud-storage-page.tsx @@ -8,12 +8,12 @@ import { shallowEqual, useDispatch, useSelector } from 'react-redux'; import { useParams } from 'react-router-dom'; import { Row, Col } from 'antd/lib/grid'; import Spin from 'antd/lib/spin'; -import Result from 'antd/lib/result'; import Text from 'antd/lib/typography/Text'; import { CombinedState } from 'reducers'; import { getCloudStoragesAsync } from 'actions/cloud-storage-actions'; import CreateCloudStorageForm from 'components/create-cloud-storage-page/cloud-storage-form'; +import { CloudStorageNotFoundComponent } from 'components/common/not-found'; interface ParamType { id: string; @@ -45,14 +45,7 @@ export default function UpdateCloudStoragePageComponent(): JSX.Element { } if (!cloudStorage) { - return ( - - ); + return ; } return ( diff --git a/cvat-ui/src/config.tsx b/cvat-ui/src/config.tsx index 7e0b404b093a..05732d9e83ce 100644 --- a/cvat-ui/src/config.tsx +++ b/cvat-ui/src/config.tsx @@ -108,6 +108,7 @@ const DEFAULT_GOOGLE_CLOUD_STORAGE_LOCATIONS: string[][] = [ ['NAM4', 'US-CENTRAL1 and US-EAST1'], ]; +const MAXIMUM_NOTIFICATION_MESSAGE_LENGTH = 600; // all above will be sent to console const HEALTH_CHECK_RETRIES = 10; const HEALTH_CHECK_PERIOD = 3000; // ms const HEALTH_CHECK_REQUEST_TIMEOUT = 15000; // ms @@ -190,4 +191,5 @@ export default { REQUEST_SUCCESS_NOTIFICATION_DURATION, BLACKLISTED_GO_BACK_PATHS, PAID_PLACEHOLDER_CONFIG, + MAXIMUM_NOTIFICATION_MESSAGE_LENGTH, }; diff --git a/cvat-ui/src/containers/actions-menu/actions-menu.tsx b/cvat-ui/src/containers/actions-menu/actions-menu.tsx index e9773c2b9051..3c200feb58a0 100644 --- a/cvat-ui/src/containers/actions-menu/actions-menu.tsx +++ b/cvat-ui/src/containers/actions-menu/actions-menu.tsx @@ -17,6 +17,7 @@ import { } from 'actions/tasks-actions'; import { exportActions } from 'actions/export-actions'; import { importActions } from 'actions/import-actions'; +import { RQStatus } from 'cvat-core-wrapper'; interface OwnProps { taskInstance: any; @@ -46,9 +47,12 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps { formats: { annotationFormats }, } = state; + const inference = state.models.inferences[tid]; + return { annotationFormats, - inferenceIsActive: tid in state.models.inferences, + inferenceIsActive: inference && + ![RQStatus.FAILED, RQStatus.FINISHED].includes(inference.status), }; } diff --git a/cvat-ui/src/containers/annotation-page/standard-workspace/controls-side-bar/draw-shape-popover.tsx b/cvat-ui/src/containers/annotation-page/standard-workspace/controls-side-bar/draw-shape-popover.tsx index 45325a7ec6ac..50e7ac511832 100644 --- a/cvat-ui/src/containers/annotation-page/standard-workspace/controls-side-bar/draw-shape-popover.tsx +++ b/cvat-ui/src/containers/annotation-page/standard-workspace/controls-side-bar/draw-shape-popover.tsx @@ -204,7 +204,7 @@ class DrawShapePopoverContainer extends React.PureComponent { numberOfPoints={numberOfPoints} rectDrawingMethod={rectDrawingMethod} cuboidDrawingMethod={cuboidDrawingMethod} - repeatShapeShortcut={normalizedKeyMap.SWITCH_DRAW_MODE} + repeatShapeShortcut={normalizedKeyMap.SWITCH_DRAW_MODE_STANDARD_CONTROLS} onChangeLabel={this.onChangeLabel} onChangePoints={this.onChangePoints} onChangeRectDrawingMethod={this.onChangeRectDrawingMethod} diff --git a/cvat-ui/src/containers/annotation-page/standard-workspace/controls-side-bar/setup-tag-popover.tsx b/cvat-ui/src/containers/annotation-page/standard-workspace/controls-side-bar/setup-tag-popover.tsx index 392dfa11785a..1c6236956197 100644 --- a/cvat-ui/src/containers/annotation-page/standard-workspace/controls-side-bar/setup-tag-popover.tsx +++ b/cvat-ui/src/containers/annotation-page/standard-workspace/controls-side-bar/setup-tag-popover.tsx @@ -154,7 +154,7 @@ class DrawShapePopoverContainer extends React.PureComponent { diff --git a/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/object-buttons.tsx b/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/object-buttons.tsx index 61f6c1064c61..9e2748047294 100644 --- a/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/object-buttons.tsx +++ b/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/object-buttons.tsx @@ -9,7 +9,7 @@ import { connect } from 'react-redux'; import { ObjectState, Job } from 'cvat-core-wrapper'; import isAbleToChangeFrame from 'utils/is-able-to-change-frame'; import { ThunkDispatch } from 'utils/redux'; -import { updateAnnotationsAsync, changeFrameAsync } from 'actions/annotation-actions'; +import { updateAnnotationsAsync, changeFrameAsync, changeHideActiveObjectAsync } from 'actions/annotation-actions'; import { CombinedState } from 'reducers'; import ItemButtonsComponent from 'components/annotation-page/standard-workspace/objects-side-bar/object-item-buttons'; @@ -29,11 +29,13 @@ interface StateToProps { outsideDisabled: boolean; hiddenDisabled: boolean; keyframeDisabled: boolean; + editedState: ObjectState | null, } interface DispatchToProps { updateAnnotations(statesToUpdate: any[]): void; changeFrame(frame: number): void; + changeHideEditedState(value: boolean): void; } function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps { @@ -44,6 +46,7 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps { player: { frame: { number: frameNumber }, }, + editing: { objectState: editedState }, }, shortcuts: { normalizedKeyMap }, } = state; @@ -61,6 +64,7 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps { objectState, normalizedKeyMap, frameNumber, + editedState, jobInstance: jobInstance as Job, outsideDisabled: typeof outsideDisabled === 'undefined' ? false : outsideDisabled, hiddenDisabled: typeof hiddenDisabled === 'undefined' ? false : hiddenDisabled, @@ -76,6 +80,9 @@ function mapDispatchToProps(dispatch: ThunkDispatch): DispatchToProps { changeFrame(frame: number): void { dispatch(changeFrameAsync(frame)); }, + changeHideEditedState(value: boolean): void { + dispatch(changeHideActiveObjectAsync(value)); + }, }; } @@ -145,15 +152,23 @@ class ItemButtonsWrapper extends React.PureComponent { - const { objectState } = this.props; - objectState.hidden = false; - this.commit(); + const { objectState, editedState, changeHideEditedState } = this.props; + if (objectState.clientID === editedState?.clientID) { + changeHideEditedState(false); + } else { + objectState.hidden = false; + this.commit(); + } }; private hide = (): void => { - const { objectState } = this.props; - objectState.hidden = true; - this.commit(); + const { objectState, editedState, changeHideEditedState } = this.props; + if (objectState.clientID === editedState?.clientID) { + changeHideEditedState(true); + } else { + objectState.hidden = true; + this.commit(); + } }; private setOccluded = (): void => { diff --git a/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/object-item.tsx b/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/object-item.tsx index 9cbb75bd75f2..362455a29fbf 100644 --- a/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/object-item.tsx +++ b/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/object-item.tsx @@ -20,6 +20,7 @@ import { import { ActiveControl, CombinedState, ColorBy, ShapeType, } from 'reducers'; +import { openAnnotationsActionModal } from 'components/annotation-page/annotations-actions/annotations-actions-modal'; import ObjectStateItemComponent from 'components/annotation-page/standard-workspace/objects-side-bar/object-item'; import { getColor } from 'components/annotation-page/standard-workspace/objects-side-bar/shared'; import openCVWrapper from 'utils/opencv-wrapper/opencv-wrapper'; @@ -376,6 +377,11 @@ class ObjectItemContainer extends React.PureComponent { } }; + private runAnnotationAction = (): void => { + const { objectState } = this.props; + openAnnotationsActionModal(objectState); + }; + private commit(): void { const { objectState, readonly, updateState } = this.props; if (!readonly) { @@ -426,6 +432,7 @@ class ObjectItemContainer extends React.PureComponent { edit={this.edit} slice={this.slice} resetCuboidPerspective={this.resetCuboidPerspective} + runAnnotationAction={this.runAnnotationAction} /> ); } diff --git a/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/objects-list.tsx b/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/objects-list.tsx index 7f120cc981d6..5df7b556ff34 100644 --- a/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/objects-list.tsx +++ b/cvat-ui/src/containers/annotation-page/standard-workspace/objects-side-bar/objects-list.tsx @@ -19,6 +19,7 @@ import { switchPropagateVisibility as switchPropagateVisibilityAction, removeObject as removeObjectAction, fetchAnnotationsAsync, + changeHideActiveObjectAsync, } from 'actions/annotation-actions'; import { changeShowGroundTruth as changeShowGroundTruthAction, @@ -26,12 +27,14 @@ import { import isAbleToChangeFrame from 'utils/is-able-to-change-frame'; import { CombinedState, StatesOrdering, ObjectType, ColorBy, Workspace, + ActiveControl, } from 'reducers'; import { ObjectState, ShapeType } from 'cvat-core-wrapper'; import { filterAnnotations } from 'utils/filter-annotations'; import { registerComponentShortcuts } from 'actions/shortcuts-actions'; import { ShortcutScope } from 'utils/enums'; import { subKeyMap } from 'utils/component-subkeymap'; +import { openAnnotationsActionModal } from 'components/annotation-page/annotations-actions/annotations-actions-modal'; interface OwnProps { readonly: boolean; @@ -56,6 +59,9 @@ interface StateToProps { normalizedKeyMap: Record; showGroundTruth: boolean; workspace: Workspace; + editedState: ObjectState | null, + activeControl: ActiveControl, + activeObjectHidden: boolean, } interface DispatchToProps { @@ -67,6 +73,7 @@ interface DispatchToProps { changeFrame(frame: number): void; changeGroupColor(group: number, color: string): void; changeShowGroundTruth(value: boolean): void; + changeHideEditedState(value: boolean): void; } const componentShortcuts = { @@ -142,6 +149,12 @@ const componentShortcuts = { sequences: ['ctrl+c'], scope: ShortcutScope.OBJECTS_SIDEBAR, }, + RUN_ANNOTATIONS_ACTION: { + name: 'Run annotations action', + description: 'Opens a dialog with annotations actions', + sequences: ['ctrl+e'], + scope: ShortcutScope.OBJECTS_SIDEBAR, + }, PROPAGATE_OBJECT: { name: 'Propagate object', description: 'Make a copy of the object on the following frames', @@ -186,6 +199,10 @@ function mapStateToProps(state: CombinedState): StateToProps { player: { frame: { number: frameNumber }, }, + canvas: { + activeControl, activeObjectHidden, + }, + editing: { objectState: editedState }, colors, workspace, }, @@ -233,6 +250,9 @@ function mapStateToProps(state: CombinedState): StateToProps { normalizedKeyMap, showGroundTruth, workspace, + editedState, + activeControl, + activeObjectHidden, }; } @@ -263,6 +283,9 @@ function mapDispatchToProps(dispatch: any): DispatchToProps { dispatch(changeShowGroundTruthAction(value)); dispatch(fetchAnnotationsAsync()); }, + changeHideEditedState(value: boolean): void { + dispatch(changeHideActiveObjectAsync(value)); + }, }; } @@ -388,9 +411,13 @@ class ObjectsListContainer extends React.PureComponent { } private hideAllStates(hidden: boolean): void { - const { updateAnnotations } = this.props; + const { updateAnnotations, editedState, changeHideEditedState } = this.props; const { filteredStates } = this.state; + if (editedState?.shapeType === ShapeType.MASK) { + changeHideEditedState(hidden); + } + for (const objectState of filteredStates) { objectState.hidden = hidden; } @@ -475,6 +502,13 @@ class ObjectsListContainer extends React.PureComponent { SWITCH_HIDDEN: (event: KeyboardEvent | undefined) => { preventDefault(event); const state = activatedState(); + const { + editedState, changeHideEditedState, activeControl, activeObjectHidden, + } = this.props; + if (editedState?.shapeType === ShapeType.MASK || activeControl === ActiveControl.DRAW_MASK) { + const hide = editedState ? !editedState.hidden : !activeObjectHidden; + changeHideEditedState(hide); + } if (state) { state.hidden = !state.hidden; updateAnnotations([state]); @@ -561,6 +595,16 @@ class ObjectsListContainer extends React.PureComponent { copyShape(state); } }, + RUN_ANNOTATIONS_ACTION: () => { + const state = activatedState(true); + if (!readonly) { + if (state) { + openAnnotationsActionModal(state); + } else { + openAnnotationsActionModal(); + } + } + }, PROPAGATE_OBJECT: (event: KeyboardEvent | undefined) => { preventDefault(event); const state = activatedState(); diff --git a/cvat-ui/src/containers/annotation-page/top-bar/top-bar.tsx b/cvat-ui/src/containers/annotation-page/top-bar/top-bar.tsx index 4d81fa0822c2..7185106e05d8 100644 --- a/cvat-ui/src/containers/annotation-page/top-bar/top-bar.tsx +++ b/cvat-ui/src/containers/annotation-page/top-bar/top-bar.tsx @@ -28,7 +28,7 @@ import { import AnnotationTopBarComponent from 'components/annotation-page/top-bar/top-bar'; import { Canvas } from 'cvat-canvas-wrapper'; import { Canvas3d } from 'cvat-canvas3d-wrapper'; -import { DimensionType, Job, JobType } from 'cvat-core-wrapper'; +import { Job } from 'cvat-core-wrapper'; import { CombinedState, FrameSpeed, @@ -225,10 +225,12 @@ type Props = StateToProps & DispatchToProps & RouteComponentProps; class AnnotationTopBarContainer extends React.PureComponent { private inputFrameRef: React.RefObject; private autoSaveInterval: number | undefined; + private isWaitingForPlayDelay: boolean; private unblock: any; constructor(props: Props) { super(props); + this.isWaitingForPlayDelay = false; this.inputFrameRef = React.createRef(); } @@ -270,7 +272,7 @@ class AnnotationTopBarContainer extends React.PureComponent { if (this.autoSaveInterval) window.clearInterval(this.autoSaveInterval); this.autoSaveInterval = window.setInterval(this.autoSave.bind(this), autoSaveInterval); } - this.play(); + this.handlePlayIfNecessary(); } public componentWillUnmount(): void { @@ -279,6 +281,50 @@ class AnnotationTopBarContainer extends React.PureComponent { this.unblock(); } + private async handlePlayIfNecessary(): Promise { + const { + jobInstance, + frameNumber, + frameDelay, + frameFetching, + playing, + canvasIsReady, + onSwitchPlay, + onChangeFrame, + } = this.props; + + const { stopFrame } = jobInstance; + + if (playing && canvasIsReady && !frameFetching && !this.isWaitingForPlayDelay) { + this.isWaitingForPlayDelay = true; + try { + await new Promise((resolve) => { + setTimeout(resolve, frameDelay); + }); + + const { playing: currentPlaying, showDeletedFrames } = this.props; + + if (currentPlaying) { + const nextCandidate = frameNumber + 1; + if (nextCandidate > stopFrame) { + onSwitchPlay(false); + return; + } + + const next = await jobInstance.frames + .search({ notDeleted: !showDeletedFrames }, nextCandidate, stopFrame); + if (next !== null && isAbleToChangeFrame(next)) { + onChangeFrame(next, currentPlaying); + } else { + onSwitchPlay(false); + } + } + } finally { + this.isWaitingForPlayDelay = false; + } + } + } + private undo = (): void => { const { undo, undoAction } = this.props; @@ -569,60 +615,6 @@ class AnnotationTopBarContainer extends React.PureComponent { return undefined; }; - private play(): void { - const { - jobInstance, - frameSpeed, - frameNumber, - frameDelay, - frameFetching, - playing, - canvasIsReady, - onSwitchPlay, - onChangeFrame, - } = this.props; - - if (playing && canvasIsReady && !frameFetching) { - if (frameNumber < jobInstance.stopFrame) { - let framesSkipped = 0; - if (frameSpeed === FrameSpeed.Fast && frameNumber + 1 < jobInstance.stopFrame) { - framesSkipped = 1; - } - if (frameSpeed === FrameSpeed.Fastest && frameNumber + 2 < jobInstance.stopFrame) { - framesSkipped = 2; - } - - setTimeout(async () => { - const { playing: stillPlaying } = this.props; - if (stillPlaying) { - if (isAbleToChangeFrame()) { - if (jobInstance.type === JobType.GROUND_TRUTH) { - const newFrame = await jobInstance.frames.search( - { notDeleted: true }, - frameNumber + 1, - jobInstance.stopFrame, - ); - if (newFrame !== null) { - onChangeFrame(newFrame, stillPlaying); - } else { - onSwitchPlay(false); - } - } else { - onChangeFrame(frameNumber + 1 + framesSkipped, stillPlaying, framesSkipped + 1); - } - } else if (jobInstance.dimension === DimensionType.DIMENSION_2D) { - onSwitchPlay(false); - } else { - setTimeout(() => this.play(), frameDelay); - } - } - }, frameDelay); - } else { - onSwitchPlay(false); - } - } - } - private autoSave(): void { const { autoSave, saving, onSaveAnnotation } = this.props; @@ -697,7 +689,7 @@ class AnnotationTopBarContainer extends React.PureComponent { redoAction={redoAction} undoShortcut={normalizedKeyMap.UNDO} redoShortcut={normalizedKeyMap.REDO} - drawShortcut={normalizedKeyMap.SWITCH_DRAW_MODE} + drawShortcut={normalizedKeyMap.SWITCH_DRAW_MODE_STANDARD_CONTROLS} switchToolsBlockerShortcut={normalizedKeyMap.SWITCH_TOOLS_BLOCKER_STATE} playPauseShortcut={normalizedKeyMap.PLAY_PAUSE} deleteFrameShortcut={normalizedKeyMap.DELETE_FRAME} diff --git a/cvat-ui/src/containers/tasks-page/task-item.tsx b/cvat-ui/src/containers/tasks-page/task-item.tsx index 2131fd643417..b8950cd60f5b 100644 --- a/cvat-ui/src/containers/tasks-page/task-item.tsx +++ b/cvat-ui/src/containers/tasks-page/task-item.tsx @@ -6,11 +6,9 @@ import { connect } from 'react-redux'; import { Task, Request } from 'cvat-core-wrapper'; -import { - TasksQuery, CombinedState, ActiveInference, PluginComponent, -} from 'reducers'; +import { CombinedState, ActiveInference, PluginComponent } from 'reducers'; import TaskItemComponent from 'components/tasks-page/task-item'; -import { getTasksAsync, updateTaskInState as updateTaskInStateAction, getTaskPreviewAsync } from 'actions/tasks-actions'; +import { updateTaskInState as updateTaskInStateAction, getTaskPreviewAsync } from 'actions/tasks-actions'; import { cancelInferenceAsync } from 'actions/models-actions'; interface StateToProps { @@ -22,7 +20,6 @@ interface StateToProps { } interface DispatchToProps { - getTasks(query: TasksQuery): void; updateTaskInState(task: Task): void; cancelAutoAnnotation(): void; } @@ -53,9 +50,6 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps { function mapDispatchToProps(dispatch: any, own: OwnProps): DispatchToProps { return { - getTasks(query: TasksQuery): void { - dispatch(getTasksAsync(query)); - }, cancelAutoAnnotation(): void { dispatch(cancelInferenceAsync(own.taskID)); }, diff --git a/cvat-ui/src/containers/tasks-page/tasks-list.tsx b/cvat-ui/src/containers/tasks-page/tasks-list.tsx index b32cfaf7186a..630e81647691 100644 --- a/cvat-ui/src/containers/tasks-page/tasks-list.tsx +++ b/cvat-ui/src/containers/tasks-page/tasks-list.tsx @@ -1,39 +1,24 @@ // Copyright (C) 2020-2022 Intel Corporation -// Copyright (C) 2022 CVAT.ai Corporation +// Copyright (C) 2022-2024 CVAT.ai Corporation // // SPDX-License-Identifier: MIT import React from 'react'; import { connect } from 'react-redux'; -import { TasksState, TasksQuery, CombinedState } from 'reducers'; +import { TasksState, CombinedState } from 'reducers'; import TasksListComponent from 'components/tasks-page/task-list'; -import { getTasksAsync } from 'actions/tasks-actions'; interface StateToProps { tasks: TasksState; } -interface DispatchToProps { - getTasks: (query: TasksQuery) => void; -} - function mapStateToProps(state: CombinedState): StateToProps { return { tasks: state.tasks, }; } -function mapDispatchToProps(dispatch: any): DispatchToProps { - return { - getTasks: (query: TasksQuery): void => { - dispatch(getTasksAsync(query)); - }, - }; -} - -type TasksListContainerProps = StateToProps & DispatchToProps; - -function TasksListContainer(props: TasksListContainerProps): JSX.Element { +function TasksListContainer(props: StateToProps): JSX.Element { const { tasks } = props; return ( @@ -43,4 +28,4 @@ function TasksListContainer(props: TasksListContainerProps): JSX.Element { ); } -export default connect(mapStateToProps, mapDispatchToProps)(TasksListContainer); +export default connect(mapStateToProps)(TasksListContainer); diff --git a/cvat-ui/src/cvat-core-wrapper.ts b/cvat-ui/src/cvat-core-wrapper.ts index dac86011953a..fc255dd53324 100644 --- a/cvat-ui/src/cvat-core-wrapper.ts +++ b/cvat-ui/src/cvat-core-wrapper.ts @@ -10,13 +10,13 @@ import ObjectState from 'cvat-core/src/object-state'; import Webhook from 'cvat-core/src/webhook'; import MLModel from 'cvat-core/src/ml-model'; import CloudStorage from 'cvat-core/src/cloud-storage'; -import { ModelProvider } from 'cvat-core/src/lambda-manager'; import { Label, Attribute, } from 'cvat-core/src/labels'; import { SerializedAttribute, SerializedLabel, SerializedAPISchema, } from 'cvat-core/src/server-response-types'; +import { UpdateStatusData } from 'cvat-core/src/core-types'; import { Job, Task } from 'cvat-core/src/session'; import Project from 'cvat-core/src/project'; import QualityReport, { QualitySummary } from 'cvat-core/src/quality-report'; @@ -25,9 +25,9 @@ import QualitySettings, { TargetMetric } from 'cvat-core/src/quality-settings'; import { FramesMetaData, FrameData } from 'cvat-core/src/frames'; import { ServerError, RequestError } from 'cvat-core/src/exceptions'; import { - ShapeType, LabelType, ModelKind, ModelProviders, - ModelReturnType, DimensionType, JobType, - JobStage, JobState, RQStatus, + ShapeType, ObjectType, LabelType, ModelKind, ModelProviders, + ModelReturnType, DimensionType, JobType, Source, + JobStage, JobState, RQStatus, StorageLocation, } from 'cvat-core/src/enums'; import { Storage, StorageData } from 'cvat-core/src/storage'; import Issue from 'cvat-core/src/issue'; @@ -35,13 +35,15 @@ import Comment from 'cvat-core/src/comment'; import User from 'cvat-core/src/user'; import Organization, { Membership, Invitation } from 'cvat-core/src/organization'; import AnnotationGuide from 'cvat-core/src/guide'; -import ValidationLayout from 'cvat-core/src/validation-layout'; +import { JobValidationLayout, TaskValidationLayout } from 'cvat-core/src/validation-layout'; import AnalyticsReport, { AnalyticsEntryViewType, AnalyticsEntry } from 'cvat-core/src/analytics-report'; import { Dumper } from 'cvat-core/src/annotation-formats'; import { Event } from 'cvat-core/src/event'; import { APIWrapperEnterOptions } from 'cvat-core/src/plugins'; -import BaseSingleFrameAction, { ActionParameterType, FrameSelectionType } from 'cvat-core/src/annotations-actions'; -import { Request } from 'cvat-core/src/request'; +import { BaseShapesAction } from 'cvat-core/src/annotations-actions/base-shapes-action'; +import { BaseCollectionAction } from 'cvat-core/src/annotations-actions/base-collection-action'; +import { ActionParameterType, BaseAction } from 'cvat-core/src/annotations-actions/base-action'; +import { Request, RequestOperation } from 'cvat-core/src/request'; const cvat: CVATCore = _cvat; @@ -68,6 +70,8 @@ export { AnnotationGuide, Attribute, ShapeType, + Source, + ObjectType, LabelType, Storage, Webhook, @@ -88,7 +92,9 @@ export { JobStage, JobState, RQStatus, - BaseSingleFrameAction, + BaseAction, + BaseShapesAction, + BaseCollectionAction, QualityReport, QualityConflict, QualitySettings, @@ -104,19 +110,21 @@ export { Event, FrameData, ActionParameterType, - FrameSelectionType, Request, - ValidationLayout, + JobValidationLayout, + TaskValidationLayout, + StorageLocation, }; export type { SerializedAttribute, SerializedLabel, StorageData, - ModelProvider, APIWrapperEnterOptions, QualitySummary, CVATCore, SerializedAPISchema, ProjectOrTaskOrJob, + RequestOperation, + UpdateStatusData, }; diff --git a/cvat-ui/src/index.html b/cvat-ui/src/index.html index 65e4812fe512..f23b42af30d9 100644 --- a/cvat-ui/src/index.html +++ b/cvat-ui/src/index.html @@ -16,7 +16,6 @@ name="description" content="Computer Vision Annotation Tool (CVAT) is a free, open source, web-based image and video annotation tool which is used for labeling data for computer vision algorithms. CVAT supports the primary tasks of supervised machine learning: object detection, image classification, and image segmentation. CVAT allows users to annotate data for each of these cases" /> - diff --git a/cvat-ui/src/reducers/annotation-reducer.ts b/cvat-ui/src/reducers/annotation-reducer.ts index 847a0b84d93d..311f54c0fe96 100644 --- a/cvat-ui/src/reducers/annotation-reducer.ts +++ b/cvat-ui/src/reducers/annotation-reducer.ts @@ -10,7 +10,9 @@ import { AuthActionTypes } from 'actions/auth-actions'; import { BoundariesActionTypes } from 'actions/boundaries-actions'; import { Canvas, CanvasMode } from 'cvat-canvas-wrapper'; import { Canvas3d } from 'cvat-canvas3d-wrapper'; -import { DimensionType, JobStage, LabelType } from 'cvat-core-wrapper'; +import { + DimensionType, JobStage, Label, LabelType, +} from 'cvat-core-wrapper'; import { clamp } from 'utils/math'; import { @@ -29,6 +31,16 @@ function updateActivatedStateID(newStates: any[], prevActivatedStateID: number | null; } +export function labelShapeType(label?: Label): ShapeType | null { + if (label && Object.values(ShapeType).includes(label.type as any)) { + return label.type as unknown as ShapeType; + } + if (label?.type === LabelType.TAG) { + return null; + } + return ShapeType.RECTANGLE; +} + const defaultState: AnnotationState = { activities: { loads: {}, @@ -51,6 +63,7 @@ const defaultState: AnnotationState = { instance: null, ready: false, activeControl: ActiveControl.CURSOR, + activeObjectHidden: false, }, job: { openTime: null, @@ -66,6 +79,7 @@ const defaultState: AnnotationState = { groundTruthJobFramesMeta: null, groundTruthInstance: null, }, + frameNumbers: [], instance: null, meta: null, attributes: {}, @@ -94,6 +108,9 @@ const defaultState: AnnotationState = { activeLabelID: null, activeObjectType: ObjectType.SHAPE, }, + editing: { + objectState: null, + }, annotations: { activatedStateID: null, activatedElementID: null, @@ -161,6 +178,7 @@ export default (state = defaultState, action: AnyAction): AnnotationState => { job, jobMeta, openTime, + frameNumbers, frameNumber: number, frameFilename: filename, relatedFiles, @@ -177,12 +195,11 @@ export default (state = defaultState, action: AnyAction): AnnotationState => { const isReview = job.stage === JobStage.VALIDATION; let workspaceSelected = null; let activeObjectType; - let activeShapeType; + let activeShapeType = null; if (defaultLabel?.type === LabelType.TAG) { activeObjectType = ObjectType.TAG; } else { - activeShapeType = defaultLabel && defaultLabel.type !== 'any' ? - defaultLabel.type : ShapeType.RECTANGLE; + activeShapeType = labelShapeType(defaultLabel); activeObjectType = job.mode === 'interpolation' ? ObjectType.TRACK : ObjectType.SHAPE; } @@ -205,6 +222,7 @@ export default (state = defaultState, action: AnyAction): AnnotationState => { job: { ...state.job, openTime, + frameNumbers, fetching: false, instance: job, meta: jobMeta, @@ -228,6 +246,10 @@ export default (state = defaultState, action: AnyAction): AnnotationState => { annotations: { ...state.annotations, filters, + zLayer: { + ...state.annotations.zLayer, + cur: Number.MAX_SAFE_INTEGER, + }, }, player: { ...state.player, @@ -633,6 +655,26 @@ export default (state = defaultState, action: AnyAction): AnnotationState => { }, }; } + case AnnotationActionTypes.UPDATE_EDITED_STATE: { + const { objectState } = action.payload; + return { + ...state, + editing: { + ...state.editing, + objectState, + }, + }; + } + case AnnotationActionTypes.HIDE_ACTIVE_OBJECT: { + const { hide } = action.payload; + return { + ...state, + canvas: { + ...state.canvas, + activeObjectHidden: hide, + }, + }; + } case AnnotationActionTypes.REMOVE_OBJECT_SUCCESS: { const { objectState, history } = action.payload; const contextMenuClientID = state.canvas.contextMenu.clientID; @@ -980,7 +1022,7 @@ export default (state = defaultState, action: AnyAction): AnnotationState => { } case AnnotationActionTypes.CHANGE_WORKSPACE: { const { workspace } = action.payload; - if (state.canvas.activeControl !== ActiveControl.CURSOR) { + if (state.canvas.activeControl !== ActiveControl.CURSOR && state.workspace !== Workspace.SINGLE_SHAPE) { return state; } @@ -992,6 +1034,11 @@ export default (state = defaultState, action: AnyAction): AnnotationState => { states: state.annotations.states.filter((_state) => !_state.isGroundTruth), activatedStateID: null, activatedAttributeID: null, + + }, + canvas: { + ...state.canvas, + activeControl: ActiveControl.CURSOR, }, }; } diff --git a/cvat-ui/src/reducers/index.ts b/cvat-ui/src/reducers/index.ts index 999d7d6c5419..337ef29927b2 100644 --- a/cvat-ui/src/reducers/index.ts +++ b/cvat-ui/src/reducers/index.ts @@ -8,7 +8,7 @@ import { Canvas, RectDrawingMethod, CuboidDrawingMethod } from 'cvat-canvas-wrap import { Webhook, MLModel, Organization, Job, Task, Project, Label, User, QualityConflict, FramesMetaData, RQStatus, Event, Invitation, SerializedAPISchema, - Request, TargetMetric, ValidationLayout, + Request, JobValidationLayout, QualitySettings, TaskValidationLayout, ObjectState, } from 'cvat-core-wrapper'; import { IntelligentScissors } from 'utils/opencv-wrapper/intelligent-scissors'; import { KeyMap, KeyMapItem } from 'utils/mousetrap-react'; @@ -38,6 +38,7 @@ interface Preview { } export interface ProjectsState { + fetchingTimestamp: number; initialized: boolean; fetching: boolean; count: number; @@ -75,6 +76,7 @@ export interface JobsQuery { } export interface JobsState { + fetchingTimestamp: number; query: JobsQuery; fetching: boolean; count: number; @@ -90,6 +92,7 @@ export interface JobsState { } export interface TasksState { + fetchingTimestamp: number; initialized: boolean; fetching: boolean; moveTask: { @@ -269,14 +272,16 @@ export interface PluginsState { qualityControlPage: { overviewTab: ((props: { task: Task; - targetMetric: TargetMetric; + qualitySettings: QualitySettings; }) => JSX.Element)[]; allocationTable: (( props: { task: Task; - gtJob: Job; + gtJobId: number; gtJobMeta: FramesMetaData; + qualitySettings: QualitySettings; + validationLayout: TaskValidationLayout; onDeleteFrames: (frames: number[]) => void; onRestoreFrames: (frames: number[]) => void; }) => JSX.Element)[]; @@ -692,6 +697,10 @@ export enum NavigationType { EMPTY = 'empty', } +export interface EditingState { + objectState: ObjectState | null; +} + export interface AnnotationState { activities: { loads: { @@ -717,6 +726,7 @@ export interface AnnotationState { instance: Canvas | Canvas3d | null; ready: boolean; activeControl: ActiveControl; + activeObjectHidden: boolean; }; job: { openTime: null | number; @@ -724,13 +734,14 @@ export interface AnnotationState { requestedId: number | null; meta: FramesMetaData | null; instance: Job | null | undefined; + frameNumbers: number[]; queryParameters: { initialOpenGuide: boolean; defaultLabel: string | null; defaultPointsCount: number | null; }; groundTruthInfo: { - validationLayout: ValidationLayout | null; + validationLayout: JobValidationLayout | null; groundTruthJobFramesMeta: FramesMetaData | null; groundTruthInstance: Job | null; }, @@ -758,7 +769,7 @@ export interface AnnotationState { drawing: { activeInteractor?: MLModel | OpenCVTool; activeInteractorParameters?: MLModel['params']['canvas']; - activeShapeType: ShapeType; + activeShapeType: ShapeType | null; activeRectDrawingMethod?: RectDrawingMethod; activeCuboidDrawingMethod?: CuboidDrawingMethod; activeNumOfPoints?: number; @@ -766,6 +777,7 @@ export interface AnnotationState { activeObjectType: ObjectType; activeInitialState?: any; }; + editing: EditingState; annotations: { activatedStateID: number | null; activatedElementID: number | null; diff --git a/cvat-ui/src/reducers/jobs-reducer.ts b/cvat-ui/src/reducers/jobs-reducer.ts index c7b07fc1fa30..4be7c5c285f8 100644 --- a/cvat-ui/src/reducers/jobs-reducer.ts +++ b/cvat-ui/src/reducers/jobs-reducer.ts @@ -7,6 +7,7 @@ import { JobsActions, JobsActionTypes } from 'actions/jobs-actions'; import { JobsState } from '.'; const defaultState: JobsState = { + fetchingTimestamp: Date.now(), fetching: false, count: 0, query: { @@ -27,6 +28,7 @@ export default (state: JobsState = defaultState, action: JobsActions): JobsState case JobsActionTypes.GET_JOBS: { return { ...state, + fetchingTimestamp: action.payload.fetchingTimestamp, fetching: true, query: { ...defaultState.query, diff --git a/cvat-ui/src/reducers/notifications-reducer.ts b/cvat-ui/src/reducers/notifications-reducer.ts index 5bc4becb223e..6bb56f50017c 100644 --- a/cvat-ui/src/reducers/notifications-reducer.ts +++ b/cvat-ui/src/reducers/notifications-reducer.ts @@ -5,7 +5,7 @@ import { AnyAction } from 'redux'; -import { ServerError, RequestError } from 'cvat-core-wrapper'; +import { ServerError, RequestError, StorageLocation } from 'cvat-core-wrapper'; import { AuthActionTypes } from 'actions/auth-actions'; import { FormatsActionTypes } from 'actions/formats-actions'; import { ModelsActionTypes } from 'actions/models-actions'; @@ -355,7 +355,7 @@ export default function (state = defaultState, action: AnyAction): Notifications ...state.messages.auth, requestPasswordResetDone: { message: `Check your email for a link to reset your password. - If it doesn’t appear within a few minutes, check your spam folder.`, + If it doesn't appear within a few minutes, check your spam folder.`, }, }, }, @@ -546,9 +546,9 @@ export default function (state = defaultState, action: AnyAction): Notifications instance, instanceType, resource, target, } = action.payload; let description = `Export ${resource} for ${instanceType} ${instance.id} is finished. `; - if (target === 'local') { + if (target === StorageLocation.LOCAL) { description += 'You can [download it here](/requests).'; - } else if (target === 'cloudstorage') { + } else if (target === StorageLocation.CLOUD_STORAGE) { description = `Export ${resource} for ${instanceType} ${instance.id} has been uploaded to cloud storage.`; } @@ -590,9 +590,9 @@ export default function (state = defaultState, action: AnyAction): Notifications instance, instanceType, target, } = action.payload; let description = `Backup for the ${instanceType} ${instance.id} is finished. `; - if (target === 'local') { + if (target === StorageLocation.LOCAL) { description += 'You can [download it here](/requests).'; - } else if (target === 'cloudstorage') { + } else if (target === StorageLocation.CLOUD_STORAGE) { description = `Backup for the ${instanceType} ${instance.id} has been uploaded to cloud storage.`; } diff --git a/cvat-ui/src/reducers/projects-reducer.ts b/cvat-ui/src/reducers/projects-reducer.ts index 7c2db7cd80ac..5f74f1c5c620 100644 --- a/cvat-ui/src/reducers/projects-reducer.ts +++ b/cvat-ui/src/reducers/projects-reducer.ts @@ -12,6 +12,7 @@ import { AuthActionTypes } from 'actions/auth-actions'; import { ProjectsState } from '.'; const defaultState: ProjectsState = { + fetchingTimestamp: Date.now(), initialized: false, fetching: false, count: 0, @@ -59,6 +60,7 @@ export default (state: ProjectsState = defaultState, action: AnyAction): Project case ProjectsActionTypes.GET_PROJECTS: return { ...state, + fetchingTimestamp: action.payload.fetchingTimestamp, initialized: false, fetching: true, count: 0, diff --git a/cvat-ui/src/reducers/settings-reducer.ts b/cvat-ui/src/reducers/settings-reducer.ts index 0c662908d767..2a9e5ca79db7 100644 --- a/cvat-ui/src/reducers/settings-reducer.ts +++ b/cvat-ui/src/reducers/settings-reducer.ts @@ -444,8 +444,11 @@ export default (state = defaultState, action: AnyAction): SettingsState => { return { ...state, - imageFilters: filters, + shapes: { + ...state.shapes, + showGroundTruth: false, + }, }; } case AnnotationActionTypes.INTERACT_WITH_CANVAS: { diff --git a/cvat-ui/src/reducers/tasks-reducer.ts b/cvat-ui/src/reducers/tasks-reducer.ts index ce2e88258c0c..b686bbc7f26c 100644 --- a/cvat-ui/src/reducers/tasks-reducer.ts +++ b/cvat-ui/src/reducers/tasks-reducer.ts @@ -12,6 +12,7 @@ import { ProjectsActionTypes } from 'actions/projects-actions'; import { TasksState } from '.'; const defaultState: TasksState = { + fetchingTimestamp: Date.now(), initialized: false, fetching: false, moveTask: { @@ -43,6 +44,7 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState ...state.activities, deletes: {}, }, + fetchingTimestamp: action.payload.fetchingTimestamp, initialized: false, fetching: true, count: 0, diff --git a/cvat-ui/src/utils/environment.ts b/cvat-ui/src/utils/environment.ts index 15a6e4178b62..c757afe8e0c6 100644 --- a/cvat-ui/src/utils/environment.ts +++ b/cvat-ui/src/utils/environment.ts @@ -1,5 +1,5 @@ // Copyright (C) 2020-2022 Intel Corporation -// Copyright (C) 2022 CVAT.ai Corp +// Copyright (C) 2024 CVAT.ai Corporation // // SPDX-License-Identifier: MIT @@ -9,23 +9,3 @@ export function isDev(): boolean { return process.env.NODE_ENV === 'development'; } - -export function customWaViewHit(pageName?: string, queryString?: string, hashInfo?: string): void { - const waHitFunctionName = process.env.WA_PAGE_VIEW_HIT; - if (waHitFunctionName) { - const waHitFunction = new Function( - 'pageName', - 'queryString', - 'hashInfo', - `if (typeof ${waHitFunctionName} === 'function') { - ${waHitFunctionName}(pageName, queryString, hashInfo); - }`, - ); - try { - waHitFunction(pageName, queryString, hashInfo); - } catch (error: any) { - // eslint-disable-next-line - console.error(`Web analitycs hit function has failed. ${error.toString()}`); - } - } -} diff --git a/cvat-ui/src/utils/is-able-to-change-frame.ts b/cvat-ui/src/utils/is-able-to-change-frame.ts index b3029c4b2670..d86b6357cd88 100644 --- a/cvat-ui/src/utils/is-able-to-change-frame.ts +++ b/cvat-ui/src/utils/is-able-to-change-frame.ts @@ -17,15 +17,15 @@ export default function isAbleToChangeFrame(frame?: number): boolean { return false; } - const frameInTheJob = true; + let frameInTheJob = true; if (typeof frame === 'number') { if (meta.includedFrames) { // frame argument comes in job coordinates - // hovewer includedFrames contains absolute data values - return meta.includedFrames.includes(meta.getDataFrameNumber(frame - job.startFrame)); + // however includedFrames contains absolute data values + frameInTheJob = meta.includedFrames.includes(meta.getDataFrameNumber(frame - job.startFrame)); } - return frame >= job.startFrame && frame <= job.stopFrame; + frameInTheJob = frame >= job.startFrame && frame <= job.stopFrame; } return canvas.isAbleToChangeFrame() && frameInTheJob && !state.annotation.player.navigationBlocked; diff --git a/cvat-ui/src/utils/mousetrap-react.tsx b/cvat-ui/src/utils/mousetrap-react.tsx index 791760a35bdd..f7adf16930ff 100644 --- a/cvat-ui/src/utils/mousetrap-react.tsx +++ b/cvat-ui/src/utils/mousetrap-react.tsx @@ -64,24 +64,29 @@ export default function GlobalHotKeys(props: Props): JSX.Element { Mousetrap.prototype.stopCallback = function (e: KeyboardEvent, element: Element, combo: string): boolean { if (element.tagName === 'INPUT' || element.tagName === 'SELECT' || element.tagName === 'TEXTAREA') { - // stop for input, select, and textarea + // do not trigger any shortcuts if input field is one of [input, select, textarea] return true; } const activeSequences = Object.values(applicationKeyMap).map((keyMap) => [...keyMap.sequences]).flat(); if (activeSequences.some((sequence) => sequence.startsWith(combo))) { + // prevent default behaviour of the event if potentially one of active shortcuts will be trigerred e?.preventDefault(); } // stop when modals are opened - const someModalsOpened = Array.from( + const anyModalsOpened = Array.from( window.document.getElementsByClassName('ant-modal'), ).some((el) => (el as HTMLElement).style.display !== 'none'); - if (someModalsOpened) { + if (anyModalsOpened) { const modalClosingSequences = ['SWITCH_SHORTCUTS', 'SWITCH_SETTINGS'] .map((key) => [...(applicationKeyMap[key]?.sequences ?? [])]).flat(); - return !modalClosingSequences.includes(combo) && !modalClosingSequences.some((seq) => seq.startsWith(combo)); + + return !modalClosingSequences.some((seq) => { + const seqFragments = seq.split('+'); + return combo.split('+').every((key, i) => seqFragments[i] === key); + }); } return false; diff --git a/cvat-ui/src/utils/opencv-wrapper/opencv-wrapper.ts b/cvat-ui/src/utils/opencv-wrapper/opencv-wrapper.ts index b2d045c32483..cc74252a824f 100644 --- a/cvat-ui/src/utils/opencv-wrapper/opencv-wrapper.ts +++ b/cvat-ui/src/utils/opencv-wrapper/opencv-wrapper.ts @@ -201,7 +201,7 @@ export class OpenCVWrapper { cv.findContours(expanded, contours, hierarchy, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE); for (let i = 0; i < contours.size(); i++) { const contour = contours.get(i); - // substract offset we created when copied source image + // subtract offset we created when copied source image jsContours.push(Array.from(contour.data32S as number[]).map((el) => el - 1)); contour.delete(); } diff --git a/cvat/__init__.py b/cvat/__init__.py index d72cb8e0099c..cd11fa1758cc 100644 --- a/cvat/__init__.py +++ b/cvat/__init__.py @@ -4,6 +4,6 @@ from cvat.utils.version import get_version -VERSION = (2, 22, 0, 'alpha', 0) +VERSION = (2, 24, 1, "alpha", 0) __version__ = get_version(VERSION) diff --git a/cvat/apps/analytics_report/report/create.py b/cvat/apps/analytics_report/report/create.py index e028bb590ed1..8606b7cdc1f3 100644 --- a/cvat/apps/analytics_report/report/create.py +++ b/cvat/apps/analytics_report/report/create.py @@ -70,15 +70,6 @@ def _make_queue_job_id_base(self, obj) -> str: elif isinstance(obj, Job): return f"{self._QUEUE_JOB_PREFIX_JOB}{obj.id}" - @classmethod - def _get_last_report_time(cls, obj): - try: - report = obj.analytics_report - if report: - return report.created_date - except ObjectDoesNotExist: - return None - def schedule_analytics_check_job(self, *, job=None, task=None, project=None, user_id): rq_id = self._make_queue_job_id_base(job or task or project) diff --git a/cvat/apps/analytics_report/rules/analytics_reports.rego b/cvat/apps/analytics_report/rules/analytics_reports.rego index 706d6e701dbe..87910192779b 100644 --- a/cvat/apps/analytics_report/rules/analytics_reports.rego +++ b/cvat/apps/analytics_report/rules/analytics_reports.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/dataset_manager/annotation.py b/cvat/apps/dataset_manager/annotation.py index 32fbfda45c49..4ea10ba9619d 100644 --- a/cvat/apps/dataset_manager/annotation.py +++ b/cvat/apps/dataset_manager/annotation.py @@ -6,7 +6,8 @@ from copy import copy, deepcopy import math -from typing import Container, Optional, Sequence +from collections.abc import Container, Sequence +from typing import Optional import numpy as np from itertools import chain from scipy.optimize import linear_sum_assignment @@ -131,7 +132,8 @@ def filter_track_shapes(shapes): if last_key >= stop and scoped_shapes[-1]['points'] != segment_shapes[-1]['points']: segment_shapes.append(scoped_shapes[-1]) elif scoped_shapes[-1]['keyframe'] and \ - scoped_shapes[-1]['outside']: + scoped_shapes[-1]['outside'] and \ + (len(segment_shapes) == 0 or scoped_shapes[-1]['frame'] > segment_shapes[-1]['frame']): segment_shapes.append(scoped_shapes[-1]) elif stop + 1 < len(interpolated_shapes) and \ interpolated_shapes[stop + 1]['outside']: @@ -458,6 +460,7 @@ def _unite_objects(obj0, obj1): def _modify_unmatched_object(self, obj, end_frame): pass + class TrackManager(ObjectManager): def to_shapes(self, end_frame: int, *, included_frames: Optional[Sequence[int]] = None, @@ -927,6 +930,8 @@ def propagate(shape, end_frame, *, included_frames=None): prev_shape = None for shape in sorted(track["shapes"], key=lambda shape: shape["frame"]): curr_frame = shape["frame"] + if included_frames is not None and curr_frame not in included_frames: + continue if prev_shape and end_frame <= curr_frame: # If we exceed the end_frame and there was a previous shape, # we still need to interpolate up to the next keyframe, @@ -950,6 +955,11 @@ def propagate(shape, end_frame, *, included_frames=None): break # The track finishes here if prev_shape: + if ( + curr_frame == prev_shape["frame"] + and dict(shape, id=None, keyframe=None) == dict(prev_shape, id=None, keyframe=None) + ): + continue assert curr_frame > prev_shape["frame"], f"{curr_frame} > {prev_shape['frame']}. Track id: {track['id']}" # Catch invalid tracks # Propagate attributes diff --git a/cvat/apps/dataset_manager/apps.py b/cvat/apps/dataset_manager/apps.py index 3e62d078171c..2d2a03c51645 100644 --- a/cvat/apps/dataset_manager/apps.py +++ b/cvat/apps/dataset_manager/apps.py @@ -7,12 +7,3 @@ class DatasetManagerConfig(AppConfig): name = "cvat.apps.dataset_manager" - - def ready(self) -> None: - from django.conf import settings - - from . import default_settings - - for key in dir(default_settings): - if key.isupper() and not hasattr(settings, key): - setattr(settings, key, getattr(default_settings, key)) diff --git a/cvat/apps/dataset_manager/bindings.py b/cvat/apps/dataset_manager/bindings.py index 35d4b902a53a..8b759f7b6316 100644 --- a/cvat/apps/dataset_manager/bindings.py +++ b/cvat/apps/dataset_manager/bindings.py @@ -8,23 +8,21 @@ import os.path as osp import re import sys +from collections import OrderedDict, defaultdict +from collections.abc import Iterable, Iterator, Mapping, Sequence from functools import reduce from operator import add from pathlib import Path from types import SimpleNamespace -from typing import (Any, Callable, DefaultDict, Dict, Iterable, Iterator, List, Literal, Mapping, - NamedTuple, Optional, OrderedDict, Sequence, Set, Tuple, Union) +from typing import Any, Callable, Literal, NamedTuple, Optional, Union from attrs.converters import to_bool import datumaro as dm import defusedxml.ElementTree as ET import rq from attr import attrib, attrs -from datumaro.components.media import PointCloud -from datumaro.components.environment import Environment -from datumaro.components.extractor import Importer from datumaro.components.format_detection import RejectionReason -from django.db.models import QuerySet +from django.db.models import Prefetch, QuerySet from django.utils import timezone from django.conf import settings @@ -52,7 +50,7 @@ class Attribute(NamedTuple): value: Any @classmethod - def add_prefetch_info(cls, queryset: QuerySet): + def add_prefetch_info(cls, queryset: QuerySet[Label]) -> QuerySet[Label]: assert issubclass(queryset.model, Label) return add_prefetch_fields(queryset, [ @@ -280,11 +278,12 @@ def __init__(self, self._create_callback = create_callback self._MAX_ANNO_SIZE = 30000 self._frame_info = {} - self._frame_mapping: Dict[str, int] = {} + self._frame_mapping: dict[str, int] = {} self._frame_step = db_task.data.get_frame_step() self._db_data: models.Data = db_task.data self._use_server_track_ids = use_server_track_ids self._required_frames = included_frames + self._initialized_included_frames: Optional[set[int]] = None self._db_subset = db_task.subset super().__init__(db_task) @@ -536,12 +535,14 @@ def shapes(self): yield self._export_labeled_shape(shape) def get_included_frames(self): - return set( - i for i in self.rel_range - if not self._is_frame_deleted(i) - and not self._is_frame_excluded(i) - and self._is_frame_required(i) - ) + if self._initialized_included_frames is None: + self._initialized_included_frames = set( + i for i in self.rel_range + if not self._is_frame_deleted(i) + and not self._is_frame_excluded(i) + and self._is_frame_required(i) + ) + return self._initialized_included_frames def _is_frame_deleted(self, frame): return frame in self._deleted_frames @@ -858,7 +859,9 @@ def __init__(self, annotation_ir: AnnotationIR, db_task: Task, **kwargs): @staticmethod def meta_for_task(db_task, host, label_mapping=None): - db_segments = db_task.segment_set.all().prefetch_related('job_set') + db_segments = db_task.segment_set.all().prefetch_related( + Prefetch('job_set', models.Job.objects.order_by("pk")) + ) meta = OrderedDict([ ("id", str(db_task.id)), @@ -960,9 +963,9 @@ class LabeledShape: type: str = attrib() frame: int = attrib() label: str = attrib() - points: List[float] = attrib() + points: list[float] = attrib() occluded: bool = attrib() - attributes: List[InstanceLabelData.Attribute] = attrib() + attributes: list[InstanceLabelData.Attribute] = attrib() source: str = attrib(default='manual') group: int = attrib(default=0) rotation: int = attrib(default=0) @@ -970,40 +973,40 @@ class LabeledShape: task_id: int = attrib(default=None) subset: str = attrib(default=None) outside: bool = attrib(default=False) - elements: List['ProjectData.LabeledShape'] = attrib(default=[]) + elements: list['ProjectData.LabeledShape'] = attrib(default=[]) @attrs class TrackedShape: type: str = attrib() frame: int = attrib() - points: List[float] = attrib() + points: list[float] = attrib() occluded: bool = attrib() outside: bool = attrib() keyframe: bool = attrib() - attributes: List[InstanceLabelData.Attribute] = attrib() + attributes: list[InstanceLabelData.Attribute] = attrib() rotation: int = attrib(default=0) source: str = attrib(default='manual') group: int = attrib(default=0) z_order: int = attrib(default=0) label: str = attrib(default=None) track_id: int = attrib(default=0) - elements: List['ProjectData.TrackedShape'] = attrib(default=[]) + elements: list['ProjectData.TrackedShape'] = attrib(default=[]) @attrs class Track: label: str = attrib() - shapes: List['ProjectData.TrackedShape'] = attrib() + shapes: list['ProjectData.TrackedShape'] = attrib() source: str = attrib(default='manual') group: int = attrib(default=0) task_id: int = attrib(default=None) subset: str = attrib(default=None) - elements: List['ProjectData.Track'] = attrib(default=[]) + elements: list['ProjectData.Track'] = attrib(default=[]) @attrs class Tag: frame: int = attrib() label: str = attrib() - attributes: List[InstanceLabelData.Attribute] = attrib() + attributes: list[InstanceLabelData.Attribute] = attrib() source: str = attrib(default='manual') group: int = attrib(default=0) task_id: int = attrib(default=None) @@ -1017,8 +1020,8 @@ class Frame: name: str = attrib() width: int = attrib() height: int = attrib() - labeled_shapes: List[Union['ProjectData.LabeledShape', 'ProjectData.TrackedShape']] = attrib() - tags: List['ProjectData.Tag'] = attrib() + labeled_shapes: list[Union['ProjectData.LabeledShape', 'ProjectData.TrackedShape']] = attrib() + tags: list['ProjectData.Tag'] = attrib() task_id: int = attrib(default=None) subset: str = attrib(default=None) @@ -1037,12 +1040,12 @@ def __init__(self, self._host = host self._soft_attribute_import = False self._project_annotation = project_annotation - self._tasks_data: Dict[int, TaskData] = {} - self._frame_info: Dict[Tuple[int, int], Literal["path", "width", "height", "subset"]] = dict() + self._tasks_data: dict[int, TaskData] = {} + self._frame_info: dict[tuple[int, int], Literal["path", "width", "height", "subset"]] = dict() # (subset, path): (task id, frame number) - self._frame_mapping: Dict[Tuple[str, str], Tuple[int, int]] = dict() - self._frame_steps: Dict[int, int] = {} - self.new_tasks: Set[int] = set() + self._frame_mapping: dict[tuple[str, str], tuple[int, int]] = dict() + self._frame_steps: dict[int, int] = {} + self.new_tasks: set[int] = set() self._use_server_track_ids = use_server_track_ids InstanceLabelData.__init__(self, db_project) @@ -1080,12 +1083,12 @@ def _init_tasks(self): subsets = set() for task in self._db_tasks.values(): subsets.add(task.subset) - self._subsets: List[str] = list(subsets) + self._subsets: list[str] = list(subsets) - self._frame_steps: Dict[int, int] = {task.id: task.data.get_frame_step() for task in self._db_tasks.values()} + self._frame_steps: dict[int, int] = {task.id: task.data.get_frame_step() for task in self._db_tasks.values()} def _init_task_frame_offsets(self): - self._task_frame_offsets: Dict[int, int] = dict() + self._task_frame_offsets: dict[int, int] = dict() s = 0 subset = None @@ -1100,7 +1103,7 @@ def _init_task_frame_offsets(self): def _init_frame_info(self): self._frame_info = dict() self._deleted_frames = { (task.id, frame): True for task in self._db_tasks.values() for frame in task.data.deleted_frames } - original_names = DefaultDict[Tuple[str, str], int](int) + original_names = defaultdict[tuple[str, str], int](int) for task in self._db_tasks.values(): defaulted_subset = get_defaulted_subset(task.subset, self._subsets) if hasattr(task.data, 'video'): @@ -1112,7 +1115,10 @@ def _init_frame_info(self): } for frame in range(task.data.size)}) else: self._frame_info.update({(task.id, self.rel_frame_id(task.id, db_image.frame)): { - "path": mangle_image_name(db_image.path, defaulted_subset, original_names), + # do not modify honeypot names since they will be excluded from the dataset + # and their quantity should not affect the validation frame name + "path": mangle_image_name(db_image.path, defaulted_subset, original_names) \ + if not db_image.is_placeholder else db_image.path, "id": db_image.id, "width": db_image.width, "height": db_image.height, @@ -1251,7 +1257,7 @@ def _export_track(self, track: dict, task_id: int, task_size: int, idx: int): ) def group_by_frame(self, include_empty: bool = False): - frames: Dict[Tuple[str, int], ProjectData.Frame] = {} + frames: dict[tuple[str, int], ProjectData.Frame] = {} def get_frame(task_id: int, idx: int) -> ProjectData.Frame: frame_info = self._frame_info[(task_id, idx)] abs_frame = self.abs_frame_id(task_id, idx) @@ -1271,25 +1277,36 @@ def get_frame(task_id: int, idx: int) -> ProjectData.Frame: return frames[(frame_info["subset"], abs_frame)] if include_empty: - for ident in sorted(self._frame_info): - if ident not in self._deleted_frames: - get_frame(*ident) + for task_id, frame in sorted(self._frame_info): + if not self._tasks_data.get(task_id): + self.init_task_data(task_id) + + task_included_frames = self._tasks_data[task_id].get_included_frames() + if frame in task_included_frames: + get_frame(task_id, frame) + + for task_data in self.task_data: + task: Task = task_data.db_instance - for task in self._db_tasks.values(): anno_manager = AnnotationManager( self._annotation_irs[task.id], dimension=self._annotation_irs[task.id].dimension ) + task_included_frames = task_data.get_included_frames() + for shape in sorted( anno_manager.to_shapes( task.data.size, + included_frames=task_included_frames, include_outside=False, use_server_track_ids=self._use_server_track_ids ), key=lambda shape: shape.get("z_order", 0) ): - if (task.id, shape['frame']) not in self._frame_info or (task.id, shape['frame']) in self._deleted_frames: + if shape['frame'] in task_data.deleted_frames: continue + assert (task.id, shape['frame']) in self._frame_info + if 'track_id' in shape: if shape['outside']: continue @@ -1351,7 +1368,7 @@ def db_project(self): return self._db_project @property - def subsets(self) -> List[str]: + def subsets(self) -> list[str]: return self._subsets @property @@ -1368,23 +1385,33 @@ def soft_attribute_import(self, value: bool): for task_data in self._tasks_data.values(): task_data.soft_attribute_import = value + + def init_task_data(self, task_id: int) -> TaskData: + try: + task = self._db_tasks[task_id] + except KeyError as ex: + raise Exception("There is no such task in the project") from ex + + task_data = TaskData( + annotation_ir=self._annotation_irs[task_id], + db_task=task, + host=self._host, + create_callback=self._task_annotations[task_id].create \ + if self._task_annotations is not None else None, + ) + task_data._MAX_ANNO_SIZE //= len(self._db_tasks) + task_data.soft_attribute_import = self.soft_attribute_import + self._tasks_data[task_id] = task_data + + return task_data + @property def task_data(self): - for task_id, task in self._db_tasks.items(): + for task_id in self._db_tasks.keys(): if task_id in self._tasks_data: yield self._tasks_data[task_id] else: - task_data = TaskData( - annotation_ir=self._annotation_irs[task_id], - db_task=task, - host=self._host, - create_callback=self._task_annotations[task_id].create \ - if self._task_annotations is not None else None, - ) - task_data._MAX_ANNO_SIZE //= len(self._db_tasks) - task_data.soft_attribute_import = self.soft_attribute_import - self._tasks_data[task_id] = task_data - yield task_data + yield self.init_task_data(task_id) @staticmethod def _get_filename(path): @@ -1423,7 +1450,7 @@ def split_dataset(self, dataset: dm.Dataset): subset_dataset: dm.Dataset = dataset.subsets()[task_data.db_instance.subset].as_dataset() yield subset_dataset, task_data - def add_labels(self, labels: List[dict]): + def add_labels(self, labels: list[dict]): attributes = [] _labels = [] for label in labels: @@ -1436,19 +1463,22 @@ def add_task(self, task, files): self._project_annotation.add_task(task, files, self) @attrs(frozen=True, auto_attribs=True) -class ImageSource: +class MediaSource: db_task: Task - is_video: bool = attrib(kw_only=True) -class ImageProvider: - def __init__(self, sources: Dict[int, ImageSource]) -> None: + @property + def is_video(self) -> bool: + return self.db_task.mode == 'interpolation' + +class MediaProvider: + def __init__(self, sources: dict[int, MediaSource]) -> None: self._sources = sources def unload(self) -> None: pass -class ImageProvider2D(ImageProvider): - def __init__(self, sources: Dict[int, ImageSource]) -> None: +class MediaProvider2D(MediaProvider): + def __init__(self, sources: dict[int, MediaSource]) -> None: super().__init__(sources) self._current_source_id = None self._frame_provider = None @@ -1456,7 +1486,7 @@ def __init__(self, sources: Dict[int, ImageSource]) -> None: def unload(self) -> None: self._unload_source() - def get_image_for_frame(self, source_id: int, frame_index: int, **image_kwargs): + def get_media_for_frame(self, source_id: int, frame_index: int, **image_kwargs) -> dm.Image: source = self._sources[source_id] if source.is_video: @@ -1483,7 +1513,7 @@ def image_loader(_): return dm.ByteImage(data=image_loader, **image_kwargs) - def _load_source(self, source_id: int, source: ImageSource) -> None: + def _load_source(self, source_id: int, source: MediaSource) -> None: if self._current_source_id == source_id: return @@ -1498,8 +1528,8 @@ def _unload_source(self) -> None: self._current_source_id = None -class ImageProvider3D(ImageProvider): - def __init__(self, sources: Dict[int, ImageSource]) -> None: +class MediaProvider3D(MediaProvider): + def __init__(self, sources: dict[int, MediaSource]) -> None: super().__init__(sources) self._images_per_source = { source_id: { @@ -1509,7 +1539,7 @@ def __init__(self, sources: Dict[int, ImageSource]) -> None: for source_id, source in sources.items() } - def get_image_for_frame(self, source_id: int, frame_id: int, **image_kwargs): + def get_media_for_frame(self, source_id: int, frame_id: int, **image_kwargs) -> dm.PointCloud: source = self._sources[source_id] point_cloud_path = osp.join( @@ -1519,17 +1549,17 @@ def get_image_for_frame(self, source_id: int, frame_id: int, **image_kwargs): image = self._images_per_source[source_id][frame_id] related_images = [ - path + dm.Image(path=path) for rf in image.related_files.all() for path in [osp.realpath(str(rf.path))] if osp.isfile(path) ] - return point_cloud_path, related_images + return dm.PointCloud(point_cloud_path, extra_images=related_images) -IMAGE_PROVIDERS_BY_DIMENSION = { - DimensionType.DIM_3D: ImageProvider3D, - DimensionType.DIM_2D: ImageProvider2D, +MEDIA_PROVIDERS_BY_DIMENSION: dict[DimensionType, MediaProvider] = { + DimensionType.DIM_3D: MediaProvider3D, + DimensionType.DIM_2D: MediaProvider2D, } class CVATDataExtractorMixin: @@ -1538,21 +1568,21 @@ def __init__(self, *, ): self.convert_annotations = convert_annotations or convert_cvat_anno_to_dm - self._image_provider: Optional[ImageProvider] = None + self._media_provider: Optional[MediaProvider] = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback) -> None: - if self._image_provider: - self._image_provider.unload() + if self._media_provider: + self._media_provider.unload() def categories(self) -> dict: raise NotImplementedError() @staticmethod def _load_categories(labels: list): - categories: Dict[dm.AnnotationType, + categories: dict[dm.AnnotationType, dm.Categories] = {} label_categories = dm.LabelCategories(attributes=['occluded']) @@ -1612,7 +1642,7 @@ def __init__( instance_meta = instance_data.meta[instance_data.META_FIELD] dm.SourceExtractor.__init__( self, - media_type=dm.Image if dimension == DimensionType.DIM_2D else PointCloud, + media_type=dm.Image if dimension == DimensionType.DIM_2D else dm.PointCloud, subset=instance_meta['subset'], ) CVATDataExtractorMixin.__init__(self, **kwargs) @@ -1621,7 +1651,6 @@ def __init__( self._user = self._load_user_info(instance_meta) if dimension == DimensionType.DIM_3D else {} self._dimension = dimension self._format_type = format_type - dm_items = [] is_video = instance_meta['mode'] == 'interpolation' ext = '' @@ -1636,46 +1665,61 @@ def __init__( else: assert False - self._image_provider = IMAGE_PROVIDERS_BY_DIMENSION[dimension]( - {0: ImageSource(db_task, is_video=is_video)} + self._media_provider = MEDIA_PROVIDERS_BY_DIMENSION[dimension]( + {0: MediaSource(db_task)} ) + dm_items: list[dm.DatasetItem] = [] for frame_data in instance_data.group_by_frame(include_empty=True): - image_args = { - 'path': frame_data.name + ext, - 'size': (frame_data.height, frame_data.width), - } - + dm_media_args = { 'path': frame_data.name + ext } if dimension == DimensionType.DIM_3D: - dm_image = self._image_provider.get_image_for_frame(0, frame_data.id, **image_args) - elif include_images: - dm_image = self._image_provider.get_image_for_frame(0, frame_data.idx, **image_args) + dm_media: dm.PointCloud = self._media_provider.get_media_for_frame( + 0, frame_data.id, **dm_media_args + ) + + if not include_images: + dm_media_args["extra_images"] = [ + dm.Image(path=osp.basename(image.path)) + for image in dm_media.extra_images + ] + dm_media = dm.PointCloud(**dm_media_args) else: - dm_image = dm.Image(**image_args) + dm_media_args['size'] = (frame_data.height, frame_data.width) + if include_images: + dm_media: dm.Image = self._media_provider.get_media_for_frame( + 0, frame_data.idx, **dm_media_args + ) + else: + dm_media = dm.Image(**dm_media_args) + dm_anno = self._read_cvat_anno(frame_data, instance_meta['labels']) + dm_attributes = {'frame': frame_data.frame} + if dimension == DimensionType.DIM_2D: dm_item = dm.DatasetItem( - id=osp.splitext(frame_data.name)[0], - annotations=dm_anno, media=dm_image, - subset=frame_data.subset, - attributes={'frame': frame_data.frame - }) + id=osp.splitext(frame_data.name)[0], + subset=frame_data.subset, + annotations=dm_anno, + media=dm_media, + attributes=dm_attributes, + ) elif dimension == DimensionType.DIM_3D: - attributes = {'frame': frame_data.frame} if format_type == "sly_pointcloud": - attributes["name"] = self._user["name"] - attributes["createdAt"] = self._user["createdAt"] - attributes["updatedAt"] = self._user["updatedAt"] - attributes["labels"] = [] + dm_attributes["name"] = self._user["name"] + dm_attributes["createdAt"] = self._user["createdAt"] + dm_attributes["updatedAt"] = self._user["updatedAt"] + dm_attributes["labels"] = [] for (idx, (_, label)) in enumerate(instance_meta['labels']): - attributes["labels"].append({"label_id": idx, "name": label["name"], "color": label["color"], "type": label["type"]}) - attributes["track_id"] = -1 + dm_attributes["labels"].append({"label_id": idx, "name": label["name"], "color": label["color"], "type": label["type"]}) + dm_attributes["track_id"] = -1 dm_item = dm.DatasetItem( id=osp.splitext(osp.split(frame_data.name)[-1])[0], - annotations=dm_anno, media=PointCloud(dm_image[0]), related_images=dm_image[1], - attributes=attributes, subset=frame_data.subset, + subset=frame_data.subset, + annotations=dm_anno, + media=dm_media, + attributes=dm_attributes, ) dm_items.append(dm_item) @@ -1705,7 +1749,7 @@ def __init__( **kwargs ): dm.Extractor.__init__( - self, media_type=dm.Image if dimension == DimensionType.DIM_2D else PointCloud + self, media_type=dm.Image if dimension == DimensionType.DIM_2D else dm.PointCloud ) CVATDataExtractorMixin.__init__(self, **kwargs) @@ -1714,59 +1758,71 @@ def __init__( self._dimension = dimension self._format_type = format_type - dm_items: List[dm.DatasetItem] = [] - if self._dimension == DimensionType.DIM_3D or include_images: - self._image_provider = IMAGE_PROVIDERS_BY_DIMENSION[self._dimension]( + self._media_provider = MEDIA_PROVIDERS_BY_DIMENSION[self._dimension]( { - task.id: ImageSource(task, is_video=task.mode == 'interpolation') + task.id: MediaSource(task) for task in project_data.tasks } ) - ext_per_task: Dict[int, str] = { + ext_per_task: dict[int, str] = { task.id: TaskFrameProvider.VIDEO_FRAME_EXT if is_video else '' for task in project_data.tasks for is_video in [task.mode == 'interpolation'] } + dm_items: list[dm.DatasetItem] = [] for frame_data in project_data.group_by_frame(include_empty=True): - image_args = { - 'path': frame_data.name + ext_per_task[frame_data.task_id], - 'size': (frame_data.height, frame_data.width), - } + dm_media_args = { 'path': frame_data.name + ext_per_task[frame_data.task_id] } if self._dimension == DimensionType.DIM_3D: - dm_image = self._image_provider.get_image_for_frame( - frame_data.task_id, frame_data.id, **image_args) - elif include_images: - dm_image = self._image_provider.get_image_for_frame( - frame_data.task_id, frame_data.idx, **image_args) + dm_media: dm.PointCloud = self._media_provider.get_media_for_frame( + frame_data.task_id, frame_data.id, **dm_media_args + ) + + if not include_images: + dm_media_args["extra_images"] = [ + dm.Image(path=osp.basename(image.path)) + for image in dm_media.extra_images + ] + dm_media = dm.PointCloud(**dm_media_args) else: - dm_image = dm.Image(**image_args) + dm_media_args['size'] = (frame_data.height, frame_data.width) + if include_images: + dm_media: dm.Image = self._media_provider.get_media_for_frame( + frame_data.task_id, frame_data.idx, **dm_media_args + ) + else: + dm_media = dm.Image(**dm_media_args) + dm_anno = self._read_cvat_anno(frame_data, project_data.meta[project_data.META_FIELD]['labels']) + + dm_attributes = {'frame': frame_data.frame} + if self._dimension == DimensionType.DIM_2D: dm_item = dm.DatasetItem( id=osp.splitext(frame_data.name)[0], - annotations=dm_anno, media=dm_image, + annotations=dm_anno, media=dm_media, subset=frame_data.subset, - attributes={'frame': frame_data.frame} + attributes=dm_attributes, ) - else: - attributes = {'frame': frame_data.frame} + elif self._dimension == DimensionType.DIM_3D: if format_type == "sly_pointcloud": - attributes["name"] = self._user["name"] - attributes["createdAt"] = self._user["createdAt"] - attributes["updatedAt"] = self._user["updatedAt"] - attributes["labels"] = [] + dm_attributes["name"] = self._user["name"] + dm_attributes["createdAt"] = self._user["createdAt"] + dm_attributes["updatedAt"] = self._user["updatedAt"] + dm_attributes["labels"] = [] for (idx, (_, label)) in enumerate(project_data.meta[project_data.META_FIELD]['labels']): - attributes["labels"].append({"label_id": idx, "name": label["name"], "color": label["color"], "type": label["type"]}) - attributes["track_id"] = -1 + dm_attributes["labels"].append({"label_id": idx, "name": label["name"], "color": label["color"], "type": label["type"]}) + dm_attributes["track_id"] = -1 dm_item = dm.DatasetItem( id=osp.splitext(osp.split(frame_data.name)[-1])[0], - annotations=dm_anno, media=PointCloud(dm_image[0]), related_images=dm_image[1], - attributes=attributes, subset=frame_data.subset + annotations=dm_anno, media=dm_media, + subset=frame_data.subset, + attributes=dm_attributes, ) + dm_items.append(dm_item) self._items = dm_items @@ -1828,7 +1884,7 @@ def _clean_display_message(self) -> str: message = "Dataset must contain a file:" + message return re.sub(r' +', " ", message) -def mangle_image_name(name: str, subset: str, names: DefaultDict[Tuple[str, str], int]) -> str: +def mangle_image_name(name: str, subset: str, names: defaultdict[tuple[str, str], int]) -> str: name, ext = name.rsplit(osp.extsep, maxsplit=1) if not names[(subset, name)]: @@ -1849,7 +1905,7 @@ def mangle_image_name(name: str, subset: str, names: DefaultDict[Tuple[str, str] i += 1 raise Exception('Cannot mangle image name') -def get_defaulted_subset(subset: str, subsets: List[str]) -> str: +def get_defaulted_subset(subset: str, subsets: list[str]) -> str: if subset: return subset else: @@ -2011,7 +2067,7 @@ def _convert_shape(self, return results - def _convert_shapes(self, shapes: List[CommonData.LabeledShape]) -> Iterable[dm.Annotation]: + def _convert_shapes(self, shapes: list[CommonData.LabeledShape]) -> Iterable[dm.Annotation]: dm_anno = [] self.num_of_tracks = reduce( @@ -2025,7 +2081,7 @@ def _convert_shapes(self, shapes: List[CommonData.LabeledShape]) -> Iterable[dm. return dm_anno - def convert(self) -> List[dm.Annotation]: + def convert(self) -> list[dm.Annotation]: dm_anno = [] dm_anno.extend(self._convert_tags(self.cvat_frame_anno.tags)) dm_anno.extend(self._convert_shapes(self.cvat_frame_anno.labeled_shapes)) @@ -2038,7 +2094,7 @@ def convert_cvat_anno_to_dm( map_label, format_name=None, dimension=DimensionType.DIM_2D -) -> List[dm.Annotation]: +) -> list[dm.Annotation]: converter = CvatToDmAnnotationConverter( cvat_frame_anno=cvat_frame_anno, label_attrs=label_attrs, @@ -2415,18 +2471,27 @@ def load_dataset_data(project_annotation, dataset: dm.Dataset, project_data): project_annotation.add_task(task_fields, dataset_files, project_data) -def detect_dataset(dataset_dir: str, format_name: str, importer: Importer) -> None: +class NoMediaInAnnotationFileError(CvatImportError): + def __str__(self) -> str: + return ( + "Can't import media data from the annotation file. " + "Please upload full dataset as a zip archive." + ) + +def detect_dataset(dataset_dir: str, format_name: str, importer: dm.Importer) -> None: not_found_error_instance = CvatDatasetNotFoundError() - def not_found_error(_, reason, human_message): + def _handle_rejection(format_name: str, reason: RejectionReason, human_message: str) -> None: not_found_error_instance.format_name = format_name not_found_error_instance.reason = reason not_found_error_instance.message = human_message - detection_env = Environment() + detection_env = dm.Environment() detection_env.importers.items.clear() detection_env.importers.register(format_name, importer) - detected = detection_env.detect_dataset(dataset_dir, depth=4, rejection_callback=not_found_error) + detected = detection_env.detect_dataset( + dataset_dir, depth=4, rejection_callback=_handle_rejection + ) if not detected and not_found_error_instance.reason != RejectionReason.detection_unsupported: raise not_found_error_instance diff --git a/cvat/apps/dataset_manager/default_settings.py b/cvat/apps/dataset_manager/default_settings.py deleted file mode 100644 index a4dd53b0f52e..000000000000 --- a/cvat/apps/dataset_manager/default_settings.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2024 CVAT.ai Corporation -# -# SPDX-License-Identifier: MIT - -import os - -DATASET_CACHE_TTL = int(os.getenv("CVAT_DATASET_CACHE_TTL", 60 * 60 * 24)) -"Base lifetime for cached exported datasets, in seconds" - -DATASET_CACHE_LOCK_TIMEOUT = int(os.getenv("CVAT_DATASET_CACHE_LOCK_TIMEOUT", 10)) -"Timeout for cache lock acquiring, in seconds" - -DATASET_EXPORT_LOCKED_RETRY_INTERVAL = int(os.getenv("CVAT_DATASET_EXPORT_LOCKED_RETRY_INTERVAL", 60)) -"Retry interval for cases the export cache lock was unavailable, in seconds" diff --git a/cvat/apps/dataset_manager/formats/coco.py b/cvat/apps/dataset_manager/formats/coco.py index 6d63aeb0360f..1d1a8ce4d0d5 100644 --- a/cvat/apps/dataset_manager/formats/coco.py +++ b/cvat/apps/dataset_manager/formats/coco.py @@ -9,8 +9,9 @@ from datumaro.components.annotation import AnnotationType from datumaro.plugins.coco_format.importer import CocoImporter -from cvat.apps.dataset_manager.bindings import GetCVATDataExtractor, detect_dataset, \ - import_dm_annotations +from cvat.apps.dataset_manager.bindings import ( + GetCVATDataExtractor, NoMediaInAnnotationFileError, import_dm_annotations, detect_dataset +) from cvat.apps.dataset_manager.util import make_zip_archive from .registry import dm_env, exporter, importer @@ -35,6 +36,9 @@ def _import(src_file, temp_dir, instance_data, load_data_callback=None, **kwargs load_data_callback(dataset, instance_data) import_dm_annotations(dataset, instance_data) else: + if load_data_callback: + raise NoMediaInAnnotationFileError() + dataset = Dataset.import_from(src_file.name, 'coco_instances', env=dm_env) import_dm_annotations(dataset, instance_data) @@ -52,6 +56,8 @@ def _export(dst_file, temp_dir, instance_data, save_images=False): def _import(src_file, temp_dir, instance_data, load_data_callback=None, **kwargs): def remove_extra_annotations(dataset): for item in dataset: + # Boxes would have invalid (skeleton) labels, so remove them + # TODO: find a way to import boxes annotations = [ann for ann in item.annotations if ann.type != AnnotationType.bbox] item.annotations = annotations @@ -66,7 +72,9 @@ def remove_extra_annotations(dataset): load_data_callback(dataset, instance_data) import_dm_annotations(dataset, instance_data) else: - dataset = Dataset.import_from(src_file.name, - 'coco_person_keypoints', env=dm_env) + if load_data_callback: + raise NoMediaInAnnotationFileError() + + dataset = Dataset.import_from(src_file.name, 'coco_person_keypoints', env=dm_env) remove_extra_annotations(dataset) import_dm_annotations(dataset, instance_data) diff --git a/cvat/apps/dataset_manager/formats/cvat.py b/cvat/apps/dataset_manager/formats/cvat.py index 4651fd398451..fa46b58813bf 100644 --- a/cvat/apps/dataset_manager/formats/cvat.py +++ b/cvat/apps/dataset_manager/formats/cvat.py @@ -9,7 +9,7 @@ from collections import OrderedDict from glob import glob from io import BufferedWriter -from typing import Callable +from typing import Callable, Union from datumaro.components.annotation import (AnnotationType, Bbox, Label, LabelCategories, Points, Polygon, @@ -22,10 +22,16 @@ from datumaro.util.image import Image from defusedxml import ElementTree -from cvat.apps.dataset_manager.bindings import (ProjectData, CommonData, detect_dataset, - get_defaulted_subset, - import_dm_annotations, - match_dm_item) +from cvat.apps.dataset_manager.bindings import ( + NoMediaInAnnotationFileError, + ProjectData, + TaskData, + JobData, + detect_dataset, + get_defaulted_subset, + import_dm_annotations, + match_dm_item +) from cvat.apps.dataset_manager.util import make_zip_archive from cvat.apps.engine.frame_provider import FrameQuality, FrameOutputType, make_frame_provider @@ -1370,7 +1376,7 @@ def dump_project_anno(dst_file: BufferedWriter, project_data: ProjectData, callb callback(dumper, project_data) dumper.close_document() -def dump_media_files(instance_data: CommonData, img_dir: str, project_data: ProjectData = None): +def dump_media_files(instance_data: Union[TaskData, JobData], img_dir: str, project_data: ProjectData = None): frame_provider = make_frame_provider(instance_data.db_instance) ext = '' @@ -1383,9 +1389,11 @@ def dump_media_files(instance_data: CommonData, img_dir: str, project_data: Proj quality=FrameQuality.ORIGINAL, out_type=FrameOutputType.BUFFER, ) + included_frames = instance_data.get_included_frames() + for frame_id, frame in zip(instance_data.rel_range, frames): - if (project_data is not None and (instance_data.db_instance.id, frame_id) in project_data.deleted_frames) \ - or frame_id in instance_data.deleted_frames: + # exclude deleted frames and honeypots + if frame_id not in included_frames: continue frame_name = instance_data.frame_info[frame_id]['path'] if project_data is None \ else project_data.frame_info[(instance_data.db_instance.id, frame_id)]['path'] @@ -1454,4 +1462,7 @@ def _import(src_file, temp_dir, instance_data, load_data_callback=None, **kwargs for p in anno_paths: load_anno(p, instance_data) else: + if load_data_callback: + raise NoMediaInAnnotationFileError() + load_anno(src_file, instance_data) diff --git a/cvat/apps/dataset_manager/formats/datumaro.py b/cvat/apps/dataset_manager/formats/datumaro.py index 090397b7a471..4fc1d246dd47 100644 --- a/cvat/apps/dataset_manager/formats/datumaro.py +++ b/cvat/apps/dataset_manager/formats/datumaro.py @@ -3,43 +3,40 @@ # # SPDX-License-Identifier: MIT +import zipfile from datumaro.components.dataset import Dataset -from datumaro.components.extractor import ItemTransform -from datumaro.util.image import Image -from pyunpack import Archive - -from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor, detect_dataset, - import_dm_annotations) +from cvat.apps.dataset_manager.bindings import ( + GetCVATDataExtractor, import_dm_annotations, NoMediaInAnnotationFileError, detect_dataset +) from cvat.apps.dataset_manager.util import make_zip_archive from cvat.apps.engine.models import DimensionType from .registry import dm_env, exporter, importer -class DeleteImagePath(ItemTransform): - def transform_item(self, item): - image = None - if item.has_image and item.image.has_data: - image = Image(data=item.image.data, size=item.image.size) - return item.wrap(image=image, point_cloud='', related_images=[]) - @exporter(name="Datumaro", ext="ZIP", version="1.0") def _export(dst_file, temp_dir, instance_data, save_images=False): - with GetCVATDataExtractor(instance_data=instance_data, include_images=save_images) as extractor: + with GetCVATDataExtractor( + instance_data=instance_data, include_images=save_images + ) as extractor: dataset = Dataset.from_extractors(extractor, env=dm_env) - if not save_images: - dataset.transform(DeleteImagePath) dataset.export(temp_dir, 'datumaro', save_images=save_images) make_zip_archive(temp_dir, dst_file) -@importer(name="Datumaro", ext="ZIP", version="1.0") +@importer(name="Datumaro", ext="JSON, ZIP", version="1.0") def _import(src_file, temp_dir, instance_data, load_data_callback=None, **kwargs): - Archive(src_file.name).extractall(temp_dir) + if zipfile.is_zipfile(src_file): + zipfile.ZipFile(src_file).extractall(temp_dir) - detect_dataset(temp_dir, format_name='datumaro', importer=dm_env.importers.get('datumaro')) - dataset = Dataset.import_from(temp_dir, 'datumaro', env=dm_env) + detect_dataset(temp_dir, format_name='datumaro', importer=dm_env.importers.get('datumaro')) + dataset = Dataset.import_from(temp_dir, 'datumaro', env=dm_env) + else: + if load_data_callback: + raise NoMediaInAnnotationFileError() + + dataset = Dataset.import_from(src_file.name, 'datumaro', env=dm_env) if load_data_callback is not None: load_data_callback(dataset, instance_data) @@ -52,19 +49,22 @@ def _export(dst_file, temp_dir, instance_data, save_images=False): dimension=DimensionType.DIM_3D, ) as extractor: dataset = Dataset.from_extractors(extractor, env=dm_env) - - if not save_images: - dataset.transform(DeleteImagePath) dataset.export(temp_dir, 'datumaro', save_images=save_images) make_zip_archive(temp_dir, dst_file) -@importer(name="Datumaro 3D", ext="ZIP", version="1.0", dimension=DimensionType.DIM_3D) +@importer(name="Datumaro 3D", ext="JSON, ZIP", version="1.0", dimension=DimensionType.DIM_3D) def _import(src_file, temp_dir, instance_data, load_data_callback=None, **kwargs): - Archive(src_file.name).extractall(temp_dir) + if zipfile.is_zipfile(src_file): + zipfile.ZipFile(src_file).extractall(temp_dir) + + detect_dataset(temp_dir, format_name='datumaro', importer=dm_env.importers.get('datumaro')) + dataset = Dataset.import_from(temp_dir, 'datumaro', env=dm_env) + else: + if load_data_callback: + raise NoMediaInAnnotationFileError() - detect_dataset(temp_dir, format_name='datumaro', importer=dm_env.importers.get('datumaro')) - dataset = Dataset.import_from(temp_dir, 'datumaro', env=dm_env) + dataset = Dataset.import_from(src_file.name, 'datumaro', env=dm_env) if load_data_callback is not None: load_data_callback(dataset, instance_data) diff --git a/cvat/apps/dataset_manager/project.py b/cvat/apps/dataset_manager/project.py index 759483b10a06..93ac651cf477 100644 --- a/cvat/apps/dataset_manager/project.py +++ b/cvat/apps/dataset_manager/project.py @@ -4,9 +4,10 @@ # SPDX-License-Identifier: MIT import os +from collections.abc import Mapping from tempfile import TemporaryDirectory import rq -from typing import Any, Callable, List, Mapping, Tuple +from typing import Any, Callable from datumaro.components.errors import DatasetError, DatasetImportError, DatasetNotFoundError from django.db import transaction @@ -109,7 +110,7 @@ def split_name(file): project_data.new_tasks.add(db_task.id) project_data.init() - def add_labels(self, labels: List[models.Label], attributes: List[Tuple[str, models.AttributeSpec]] = None): + def add_labels(self, labels: list[models.Label], attributes: list[tuple[str, models.AttributeSpec]] = None): for label in labels: label.project = self.db_project # We need label_id here, so we can't use bulk_create here diff --git a/cvat/apps/dataset_manager/task.py b/cvat/apps/dataset_manager/task.py index 5b72f92a1ebc..83886d7e9cf1 100644 --- a/cvat/apps/dataset_manager/task.py +++ b/cvat/apps/dataset_manager/task.py @@ -13,14 +13,14 @@ from datumaro.components.errors import DatasetError, DatasetImportError, DatasetNotFoundError from django.db import transaction -from django.db.models.query import Prefetch +from django.db.models.query import Prefetch, QuerySet from django.conf import settings from rest_framework.exceptions import ValidationError from cvat.apps.engine import models, serializers from cvat.apps.engine.plugins import plugin_decorator from cvat.apps.engine.log import DatasetLogManager -from cvat.apps.engine.utils import chunked_list +from cvat.apps.engine.utils import take_by from cvat.apps.events.handlers import handle_annotations_change from cvat.apps.profiler import silk_profile @@ -81,9 +81,10 @@ def merge_table_rows(rows, keys_for_merge, field_id): return list(merged_rows.values()) + class JobAnnotation: @classmethod - def add_prefetch_info(cls, queryset): + def add_prefetch_info(cls, queryset: QuerySet[models.Job], prefetch_images: bool = True) -> QuerySet[models.Job]: assert issubclass(queryset.model, models.Job) label_qs = add_prefetch_fields(models.Label.objects.all(), [ @@ -93,6 +94,12 @@ def add_prefetch_info(cls, queryset): ]) label_qs = JobData.add_prefetch_info(label_qs) + task_data_queryset = models.Data.objects.all() + if prefetch_images: + task_data_queryset = task_data_queryset.select_related('video').prefetch_related( + Prefetch('images', queryset=models.Image.objects.order_by('frame')) + ) + return queryset.select_related( 'segment', 'segment__task', @@ -100,28 +107,35 @@ def add_prefetch_info(cls, queryset): 'segment__task__project', 'segment__task__owner', 'segment__task__assignee', - 'segment__task__project__owner', - 'segment__task__project__assignee', - Prefetch('segment__task__data', - queryset=models.Data.objects.select_related('video').prefetch_related( - Prefetch('images', queryset=models.Image.objects.order_by('frame')) - )), + Prefetch('segment__task__data', queryset=task_data_queryset), Prefetch('segment__task__label_set', queryset=label_qs), Prefetch('segment__task__project__label_set', queryset=label_qs), ) - def __init__(self, pk, *, is_prefetched=False, queryset=None): - if queryset is None: - queryset = self.add_prefetch_info(models.Job.objects) + def __init__( + self, + pk, + *, + lock_job_in_db: bool = False, + queryset: QuerySet | None = None, + prefetch_images: bool = False, + db_job: models.Job | None = None + ): + assert db_job is None or lock_job_in_db is False + assert (db_job is None and queryset is None) or prefetch_images is False + assert db_job is None or queryset is None + if db_job is None: + if queryset is None: + queryset = self.add_prefetch_info(models.Job.objects, prefetch_images=prefetch_images) + + if lock_job_in_db: + queryset = queryset.select_for_update() - if is_prefetched: - self.db_job: models.Job = queryset.select_related( - 'segment__task' - ).select_for_update().get(id=pk) - else: self.db_job: models.Job = get_cached(queryset, pk=int(pk)) + else: + self.db_job: models.Job = db_job db_segment = self.db_job.segment self.start_frame = db_segment.start_frame @@ -516,13 +530,13 @@ def _delete(self, data=None): self.ir_data.shapes = data['shapes'] self.ir_data.tracks = data['tracks'] - for labeledimage_ids_chunk in chunked_list(labeledimage_ids, chunk_size=1000): + for labeledimage_ids_chunk in take_by(labeledimage_ids, chunk_size=1000): self._delete_job_labeledimages(labeledimage_ids_chunk) - for labeledshape_ids_chunk in chunked_list(labeledshape_ids, chunk_size=1000): + for labeledshape_ids_chunk in take_by(labeledshape_ids, chunk_size=1000): self._delete_job_labeledshapes(labeledshape_ids_chunk) - for labeledtrack_ids_chunk in chunked_list(labeledtrack_ids, chunk_size=1000): + for labeledtrack_ids_chunk in take_by(labeledtrack_ids, chunk_size=1000): self._delete_job_labeledtracks(labeledtrack_ids_chunk) deleted_data = { @@ -786,6 +800,7 @@ def import_annotations(self, src_file, importer, **options): self.create(job_data.data.slice(self.start_frame, self.stop_frame).serialize()) + class TaskAnnotation: def __init__(self, pk): self.db_task = models.Task.objects.prefetch_related( @@ -797,8 +812,7 @@ def __init__(self, pk): requested_job_types.append(models.JobType.GROUND_TRUTH) self.db_jobs = ( - models.Job.objects - .select_related("segment") + JobAnnotation.add_prefetch_info(models.Job.objects, prefetch_images=False) .filter(segment__task_id=pk, type__in=requested_job_types) ) @@ -821,14 +835,14 @@ def _patch_data(self, data: Union[AnnotationIR, dict], action: Optional[PatchAct start = db_job.segment.start_frame stop = db_job.segment.stop_frame jobs[jid] = { "start": start, "stop": stop } - splitted_data[jid] = data.slice(start, stop) + splitted_data[jid] = (data.slice(start, stop), db_job) - for jid, job_data in splitted_data.items(): + for jid, (job_data, db_job) in splitted_data.items(): data = AnnotationIR(self.db_task.dimension) if action is None: - data.data = put_job_data(jid, job_data) + data.data = put_job_data(jid, job_data, db_job=db_job) else: - data.data = patch_job_data(jid, job_data, action) + data.data = patch_job_data(jid, job_data, action, db_job=db_job) if data.version > self.ir_data.version: self.ir_data.version = data.version @@ -936,18 +950,18 @@ def delete(self, data=None): self._patch_data(data, PatchAction.DELETE) else: for db_job in self.db_jobs: - delete_job_data(db_job.id) + delete_job_data(db_job.id, db_job=db_job) def init_from_db(self): self.reset() - for db_job in self.db_jobs: + for db_job in self.db_jobs.select_for_update(): if db_job.type == models.JobType.GROUND_TRUTH and not ( self.db_task.data.validation_mode == models.ValidationMode.GT_POOL ): continue - gt_annotation = JobAnnotation(db_job.id, is_prefetched=True) + gt_annotation = JobAnnotation(db_job.id, db_job=db_job) gt_annotation.init_from_db() if gt_annotation.ir_data.version > self.ir_data.version: self.ir_data.version = gt_annotation.ir_data.version @@ -1006,19 +1020,21 @@ def get_job_data(pk): return annotation.data + @silk_profile(name="POST job data") @transaction.atomic -def put_job_data(pk, data): - annotation = JobAnnotation(pk) +def put_job_data(pk, data: AnnotationIR | dict, *, db_job: models.Job | None = None): + annotation = JobAnnotation(pk, db_job=db_job) annotation.put(data) return annotation.data + @silk_profile(name="UPDATE job data") @plugin_decorator @transaction.atomic -def patch_job_data(pk, data, action): - annotation = JobAnnotation(pk) +def patch_job_data(pk, data: AnnotationIR | dict, action: PatchAction, *, db_job: models.Job | None = None): + annotation = JobAnnotation(pk, db_job=db_job) if action == PatchAction.CREATE: annotation.create(data) elif action == PatchAction.UPDATE: @@ -1028,12 +1044,14 @@ def patch_job_data(pk, data, action): return annotation.data + @silk_profile(name="DELETE job data") @transaction.atomic -def delete_job_data(pk): - annotation = JobAnnotation(pk) +def delete_job_data(pk, *, db_job: models.Job | None = None): + annotation = JobAnnotation(pk, db_job=db_job) annotation.delete() + def export_job(job_id, dst_file, format_name, server_url=None, save_images=False): # For big tasks dump function may run for a long time and # we dont need to acquire lock after the task has been initialized from DB. @@ -1041,13 +1059,14 @@ def export_job(job_id, dst_file, format_name, server_url=None, save_images=False # more dump request received at the same time: # https://github.com/cvat-ai/cvat/issues/217 with transaction.atomic(): - job = JobAnnotation(job_id) + job = JobAnnotation(job_id, prefetch_images=True, lock_job_in_db=True) job.init_from_db() exporter = make_exporter(format_name) with open(dst_file, 'wb') as f: job.export(f, exporter, host=server_url, save_images=save_images) + @silk_profile(name="GET task data") @transaction.atomic def get_task_data(pk): @@ -1056,6 +1075,7 @@ def get_task_data(pk): return annotation.data + @silk_profile(name="POST task data") @transaction.atomic def put_task_data(pk, data): @@ -1064,6 +1084,7 @@ def put_task_data(pk, data): return annotation.data + @silk_profile(name="UPDATE task data") @transaction.atomic def patch_task_data(pk, data, action): @@ -1077,12 +1098,14 @@ def patch_task_data(pk, data, action): return annotation.data + @silk_profile(name="DELETE task data") @transaction.atomic def delete_task_data(pk): annotation = TaskAnnotation(pk) annotation.delete() + def export_task(task_id, dst_file, format_name, server_url=None, save_images=False): # For big tasks dump function may run for a long time and # we dont need to acquire lock after the task has been initialized from DB. @@ -1097,6 +1120,7 @@ def export_task(task_id, dst_file, format_name, server_url=None, save_images=Fal with open(dst_file, 'wb') as f: task.export(f, exporter, host=server_url, save_images=save_images) + @transaction.atomic def import_task_annotations(src_file, task_id, format_name, conv_mask_to_poly): task = TaskAnnotation(task_id) @@ -1108,9 +1132,10 @@ def import_task_annotations(src_file, task_id, format_name, conv_mask_to_poly): except (DatasetError, DatasetImportError, DatasetNotFoundError) as ex: raise CvatImportError(str(ex)) + @transaction.atomic def import_job_annotations(src_file, job_id, format_name, conv_mask_to_poly): - job = JobAnnotation(job_id) + job = JobAnnotation(job_id, prefetch_images=True) importer = make_importer(format_name) with open(src_file, 'rb') as f: diff --git a/cvat/apps/dataset_manager/tests/test_annotation.py b/cvat/apps/dataset_manager/tests/test_annotation.py index 6687f692a0d3..ca845b156271 100644 --- a/cvat/apps/dataset_manager/tests/test_annotation.py +++ b/cvat/apps/dataset_manager/tests/test_annotation.py @@ -2,14 +2,15 @@ # # SPDX-License-Identifier: MIT -from cvat.apps.dataset_manager.annotation import TrackManager - from unittest import TestCase +from cvat.apps.dataset_manager.annotation import AnnotationIR, TrackManager +from cvat.apps.engine.models import DimensionType + class TrackManagerTest(TestCase): def _check_interpolation(self, track): - interpolated = TrackManager.get_interpolated_shapes(track, 0, 7, '2d') + interpolated = TrackManager.get_interpolated_shapes(track, 0, 7, "2d") self.assertEqual( [ @@ -24,7 +25,7 @@ def _check_interpolation(self, track): [ {k: v for k, v in shape.items() if k in ["frame", "keyframe", "outside"]} for shape in interpolated - ] + ], ) def test_point_interpolation(self): @@ -41,7 +42,7 @@ def test_point_interpolation(self): "type": "points", "occluded": False, "outside": False, - "attributes": [] + "attributes": [], }, { "frame": 2, @@ -49,7 +50,7 @@ def test_point_interpolation(self): "points": [3.0, 4.0, 5.0, 6.0], "type": "points", "occluded": False, - "outside": True + "outside": True, }, { "frame": 4, @@ -57,9 +58,9 @@ def test_point_interpolation(self): "points": [3.0, 4.0, 5.0, 6.0], "type": "points", "occluded": False, - "outside": False + "outside": False, }, - ] + ], } self._check_interpolation(track) @@ -78,7 +79,7 @@ def test_polygon_interpolation(self): "type": "polygon", "occluded": False, "outside": False, - "attributes": [] + "attributes": [], }, { "frame": 2, @@ -86,7 +87,7 @@ def test_polygon_interpolation(self): "points": [3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 4.0, 5.0], "type": "polygon", "occluded": False, - "outside": True + "outside": True, }, { "frame": 4, @@ -94,9 +95,9 @@ def test_polygon_interpolation(self): "points": [3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 4.0, 5.0], "type": "polygon", "occluded": False, - "outside": False + "outside": False, }, - ] + ], } self._check_interpolation(track) @@ -116,7 +117,7 @@ def test_bbox_interpolation(self): "type": "rectangle", "occluded": False, "outside": False, - "attributes": [] + "attributes": [], }, { "frame": 2, @@ -125,7 +126,7 @@ def test_bbox_interpolation(self): "rotation": 0, "type": "rectangle", "occluded": False, - "outside": True + "outside": True, }, { "frame": 4, @@ -134,9 +135,9 @@ def test_bbox_interpolation(self): "rotation": 0, "type": "rectangle", "occluded": False, - "outside": False + "outside": False, }, - ] + ], } self._check_interpolation(track) @@ -156,7 +157,7 @@ def test_line_interpolation(self): "type": "polyline", "occluded": False, "outside": False, - "attributes": [] + "attributes": [], }, { "frame": 2, @@ -165,7 +166,7 @@ def test_line_interpolation(self): "rotation": 0, "type": "polyline", "occluded": False, - "outside": True + "outside": True, }, { "frame": 4, @@ -174,9 +175,9 @@ def test_line_interpolation(self): "rotation": 0, "type": "polyline", "occluded": False, - "outside": False + "outside": False, }, - ] + ], } self._check_interpolation(track) @@ -196,7 +197,7 @@ def test_outside_bbox_interpolation(self): "type": "rectangle", "occluded": False, "outside": False, - "attributes": [] + "attributes": [], }, { "frame": 2, @@ -214,9 +215,9 @@ def test_outside_bbox_interpolation(self): "type": "rectangle", "occluded": False, "outside": True, - "attributes": [] - } - ] + "attributes": [], + }, + ], } expected_shapes = [ @@ -228,7 +229,7 @@ def test_outside_bbox_interpolation(self): "occluded": False, "outside": False, "attributes": [], - "keyframe": True + "keyframe": True, }, { "frame": 1, @@ -238,7 +239,7 @@ def test_outside_bbox_interpolation(self): "occluded": False, "outside": False, "attributes": [], - "keyframe": False + "keyframe": False, }, { "frame": 2, @@ -248,7 +249,7 @@ def test_outside_bbox_interpolation(self): "occluded": False, "outside": True, "attributes": [], - "keyframe": True + "keyframe": True, }, { "frame": 4, @@ -258,11 +259,11 @@ def test_outside_bbox_interpolation(self): "occluded": False, "outside": True, "attributes": [], - "keyframe": True - } + "keyframe": True, + }, ] - interpolated_shapes = TrackManager.get_interpolated_shapes(track, 0, 5, '2d') + interpolated_shapes = TrackManager.get_interpolated_shapes(track, 0, 5, "2d") self.assertEqual(expected_shapes, interpolated_shapes) def test_outside_polygon_interpolation(self): @@ -279,7 +280,7 @@ def test_outside_polygon_interpolation(self): "type": "polygon", "occluded": False, "outside": False, - "attributes": [] + "attributes": [], }, { "frame": 2, @@ -287,9 +288,9 @@ def test_outside_polygon_interpolation(self): "type": "polygon", "occluded": False, "outside": True, - "attributes": [] - } - ] + "attributes": [], + }, + ], } expected_shapes = [ @@ -300,7 +301,7 @@ def test_outside_polygon_interpolation(self): "occluded": False, "outside": False, "attributes": [], - "keyframe": True + "keyframe": True, }, { "frame": 1, @@ -309,7 +310,7 @@ def test_outside_polygon_interpolation(self): "occluded": False, "outside": False, "attributes": [], - "keyframe": False + "keyframe": False, }, { "frame": 2, @@ -318,9 +319,98 @@ def test_outside_polygon_interpolation(self): "occluded": False, "outside": True, "attributes": [], - "keyframe": True - } + "keyframe": True, + }, + ] + + interpolated_shapes = TrackManager.get_interpolated_shapes(track, 0, 3, "2d") + self.assertEqual(expected_shapes, interpolated_shapes) + + def test_duplicated_shape_interpolation(self): + # there should not be any new tracks with duplicated shapes, + # but it is possible that the database still contains some + expected_shapes = [ + { + "type": "rectangle", + "occluded": False, + "outside": False, + "points": [100, 100, 200, 200], + "frame": 0, + "attributes": [], + "rotation": 0, + }, + { + "type": "rectangle", + "occluded": False, + "outside": True, + "points": [100, 100, 200, 200], + "frame": 1, + "attributes": [], + "rotation": 0, + }, ] + track = { + "id": 666, + "frame": 0, + "group": None, + "source": "manual", + "attributes": [], + "elements": [], + "label": "cat", + "shapes": expected_shapes + [expected_shapes[-1]], + } - interpolated_shapes = TrackManager.get_interpolated_shapes(track, 0, 3, '2d') + interpolated_shapes = TrackManager.get_interpolated_shapes(track, 0, 2, "2d") self.assertEqual(expected_shapes, interpolated_shapes) + + +class AnnotationIRTest(TestCase): + def test_slice_track_does_not_duplicate_outside_frame_on_the_end(self): + track_shapes = [ + { + "type": "rectangle", + "occluded": False, + "outside": False, + "points": [100, 100, 200, 200], + "frame": 0, + "attributes": [], + "rotation": 0, + }, + { + "type": "rectangle", + "occluded": False, + "outside": True, + "points": [100, 100, 200, 200], + "frame": 1, + "attributes": [], + "rotation": 0, + }, + { + "type": "rectangle", + "occluded": False, + "outside": False, + "points": [111, 111, 222, 222], + "frame": 10, + "attributes": [], + "rotation": 0, + }, + ] + data = { + "tags": [], + "shapes": [], + "tracks": [ + { + "id": 666, + "frame": 0, + "group": None, + "source": "manual", + "attributes": [], + "elements": [], + "label": "cat", + "shapes": track_shapes, + } + ], + } + annotation = AnnotationIR(dimension=DimensionType.DIM_2D, data=data) + sliced_annotation = annotation.slice(0, 1) + self.assertEqual(sliced_annotation.data["tracks"][0]["shapes"], track_shapes[0:2]) diff --git a/cvat/apps/dataset_manager/tests/test_formats.py b/cvat/apps/dataset_manager/tests/test_formats.py index 7b9eebd5ab97..91a3081ca089 100644 --- a/cvat/apps/dataset_manager/tests/test_formats.py +++ b/cvat/apps/dataset_manager/tests/test_formats.py @@ -28,6 +28,7 @@ get_paginated_collection, ForceLogin, generate_image_file, ApiTestBase ) + class _DbTestBase(ApiTestBase): def setUp(self): super().setUp() @@ -87,6 +88,7 @@ def _create_task(self, data, image_data): return task + class TaskExportTest(_DbTestBase): def _generate_custom_annotations(self, annotations, task): self._put_api_v2_task_id_annotations(task["id"], annotations) @@ -520,6 +522,72 @@ def test_frames_outside_are_not_generated(self): self.assertTrue(frame.frame in range(6, 10)) self.assertEqual(i + 1, 4) + def _delete_job_frames(self, job_id: int, deleted_frames: list[int]): + with ForceLogin(self.user, self.client): + response = self.client.patch( + f"/api/jobs/{job_id}/data/meta?org=", + data=dict(deleted_frames=deleted_frames), + format="json" + ) + assert response.status_code == status.HTTP_200_OK, response.status_code + + def test_track_keyframes_on_deleted_frames_do_not_affect_later_frames(self): + images = self._generate_task_images(4) + task = self._generate_task(images) + job = self._get_task_jobs(task["id"])[0] + + annotations = { + "version": 0, + "tags": [], + "shapes": [], + "tracks": [ + { + "frame": 0, + "label_id": task["labels"][0]["id"], + "group": None, + "source": "manual", + "attributes": [], + "shapes": [ + { + "frame": 0, + "points": [1, 2, 3, 4], + "type": "rectangle", + "occluded": False, + "outside": False, + "attributes": [], + }, + { + "frame": 1, + "points": [5, 6, 7, 8], + "type": "rectangle", + "occluded": False, + "outside": True, + "attributes": [], + }, + { + "frame": 2, + "points": [9, 10, 11, 12], + "type": "rectangle", + "occluded": False, + "outside": False, + "attributes": [], + }, + ] + }, + ] + } + self._put_api_v2_job_id_annotations(job["id"], annotations) + self._delete_job_frames(job["id"], [2]) + + task_ann = TaskAnnotation(task["id"]) + task_ann.init_from_db() + task_data = TaskData(task_ann.ir_data, Task.objects.get(pk=task["id"])) + extractor = CvatTaskOrJobDataExtractor(task_data) + dm_dataset = Dataset.from_extractors(extractor) + + assert len(dm_dataset.get("image_3").annotations) == 0 + + class FrameMatchingTest(_DbTestBase): def _generate_task_images(self, paths): # pylint: disable=no-self-use f = BytesIO() @@ -612,6 +680,7 @@ def test_dataset_root(self): root = find_dataset_root(dataset, task_data) self.assertEqual(expected, root) + class TaskAnnotationsImportTest(_DbTestBase): def _generate_custom_annotations(self, annotations, task): self._put_api_v2_task_id_annotations(task["id"], annotations) diff --git a/cvat/apps/dataset_manager/tests/test_rest_api_formats.py b/cvat/apps/dataset_manager/tests/test_rest_api_formats.py index d60660a55c79..50883826b5a5 100644 --- a/cvat/apps/dataset_manager/tests/test_rest_api_formats.py +++ b/cvat/apps/dataset_manager/tests/test_rest_api_formats.py @@ -27,7 +27,6 @@ from attr import define, field from datumaro.components.dataset import Dataset from datumaro.components.operations import ExactComparator -from datumaro.util.test_utils import TestDir from django.contrib.auth.models import Group, User from PIL import Image from rest_framework import status @@ -35,6 +34,7 @@ import cvat.apps.dataset_manager as dm from cvat.apps.dataset_manager.bindings import CvatTaskOrJobDataExtractor, TaskData from cvat.apps.dataset_manager.task import TaskAnnotation +from cvat.apps.dataset_manager.tests.utils import TestDir from cvat.apps.dataset_manager.util import get_export_cache_lock from cvat.apps.dataset_manager.views import clear_export_cache, export, parse_export_file_path from cvat.apps.engine.models import Task @@ -141,7 +141,7 @@ def setUpTestData(cls): @classmethod def create_db_users(cls): (group_admin, _) = Group.objects.get_or_create(name="admin") - (group_user, _) = Group.objects.get_or_create(name="business") + (group_user, _) = Group.objects.get_or_create(name="user") user_admin = User.objects.create_superuser(username="admin", email="", password="admin") @@ -1318,6 +1318,33 @@ def get(self) -> str: class _LockTimeoutError(Exception): pass + def setUp(self): + self.export_cache_lock = multiprocessing.Lock() + + @contextmanager + def patched_get_export_cache_lock(self, export_path, *, ttl: int | timedelta, block: bool = True, acquire_timeout: int | timedelta): + # fakeredis lock acquired in a subprocess won't be visible to other processes + # just implement the lock here + from cvat.apps.dataset_manager.util import LockNotAvailableError + + assert acquire_timeout + assert ttl + + if isinstance(acquire_timeout, timedelta): + acquire_timeout = acquire_timeout.total_seconds() + + acquired = self.export_cache_lock.acquire( + block=block, timeout=acquire_timeout + ) + + if not acquired: + raise LockNotAvailableError + + try: + yield + finally: + self.export_cache_lock.release() + @overload @classmethod def set_condition(cls, var: SharedBool, value: bool = True): ... @@ -1340,7 +1367,7 @@ def set_condition(cls, var: SharedBase, value: Any = _not_set): @classmethod def wait_condition(cls, var: SharedBase, timeout: Optional[int] = 5): with var.condition: - if not var.condition.wait(timeout): + if not var.get() and not var.condition.wait(timeout): raise cls._LockTimeoutError @staticmethod @@ -1387,6 +1414,20 @@ def process_closing(process: multiprocessing.Process, *, timeout: Optional[int] process.join(timeout=timeout) process.close() + def _setup_task_with_annotations( + self, + *, + number_of_images: int = 3, + format_name: str | None = None, + name_ann: str | None = None, + ): + assert format_name or name_ann + images = self._generate_task_images(number_of_images) + task = self._create_task(tasks["main"], images) + self._create_annotations(task, name_ann or f"{format_name} many jobs", "default") + + return task + def test_concurrent_export_and_cleanup(self): side_effect = self.side_effect chain_side_effects = self.chain_side_effects @@ -1397,195 +1438,188 @@ def test_concurrent_export_and_cleanup(self): format_name = "CVAT for images 1.1" - export_cache_lock = multiprocessing.Lock() - - export_checked_the_file = self.SharedBool() - export_created_the_file = self.SharedBool() export_file_path = self.SharedString() + export_checked_the_file = self.SharedBool() + clear_has_been_finished = self.SharedBool() clear_removed_the_file = self.SharedBool() + export_outdated_after = timedelta(seconds=1) - @contextmanager - def patched_get_export_cache_lock(export_path, *, ttl, block=True, acquire_timeout=None): - # fakeredis lock acquired in a subprocess won't be visible to other processes - # just implement the lock here - from cvat.apps.dataset_manager.util import LockNotAvailableError - - if isinstance(acquire_timeout, timedelta): - acquire_timeout = acquire_timeout.total_seconds() - if acquire_timeout is None: - acquire_timeout = -1 - - acquired = export_cache_lock.acquire( - block=block, - timeout=acquire_timeout if acquire_timeout > -1 else None - ) - - if not acquired: - raise LockNotAvailableError - - try: - yield - finally: - export_cache_lock.release() + EXPORT_CACHE_LOCK_TTL = 4 + EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT = EXPORT_CACHE_LOCK_TTL * 2 def _export(*_, task_id: int): - from os.path import exists as original_exists - from os import replace as original_replace - from cvat.apps.dataset_manager.views import log_exception as original_log_exception import sys + from os import replace as original_replace + from os.path import exists as original_exists + from cvat.apps.dataset_manager.task import export_task as original_export_task - def os_replace_dst_recorder(_: str, dst: str): - set_condition(export_file_path, dst) - return MOCK_DEFAULT + from cvat.apps.dataset_manager.views import log_exception as original_log_exception def patched_log_exception(logger=None, exc_info=True): cur_exc_info = sys.exc_info() if exc_info is True else exc_info - if cur_exc_info and cur_exc_info[1] and isinstance(cur_exc_info[1], _LockTimeoutError): - return # don't spam in logs with expected errors + if ( + cur_exc_info + and cur_exc_info[1] + and isinstance(cur_exc_info[1], _LockTimeoutError) + ): + return # don't spam in logs with expected errors original_log_exception(logger, exc_info) with ( - patch('cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_TIMEOUT', new=5), + patch("cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_TTL", new=EXPORT_CACHE_LOCK_TTL), + patch("cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT", + new=EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT), patch( - 'cvat.apps.dataset_manager.views.get_export_cache_lock', - new=patched_get_export_cache_lock + "cvat.apps.dataset_manager.views.get_export_cache_lock", + new=self.patched_get_export_cache_lock, ), - patch('cvat.apps.dataset_manager.views.osp.exists') as mock_osp_exists, - patch('cvat.apps.dataset_manager.views.os.replace') as mock_os_replace, - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), - patch('cvat.apps.dataset_manager.views.log_exception', new=patched_log_exception), + # We need to mock the function directly imported into the module + # to ensure that the `export_checked_the_file` condition is set + # only after checking whether a file exists inside an acquired lock + patch("cvat.apps.dataset_manager.views.osp_exists") as mock_osp_exists, + patch( + "cvat.apps.dataset_manager.views.os.replace", side_effect=original_replace + ) as mock_os_replace, + patch("cvat.apps.dataset_manager.views.log_exception", new=patched_log_exception), + patch("cvat.apps.dataset_manager.views.task.export_task") as mock_export_fn, ): mock_osp_exists.side_effect = chain_side_effects( original_exists, side_effect(set_condition, export_checked_the_file), ) - - mock_os_replace.side_effect = chain_side_effects( - original_replace, - os_replace_dst_recorder, - side_effect(set_condition, export_created_the_file), - side_effect(wait_condition, clear_removed_the_file), + mock_export_fn.side_effect = chain_side_effects( + original_export_task, + side_effect(wait_condition, clear_has_been_finished), ) - - mock_rq_get_current_job.return_value = MagicMock(timeout=5) - - exited_by_timeout = False - try: - export(dst_format=format_name, task_id=task_id) - except _LockTimeoutError: - # should come from waiting for clear_removed_the_file - exited_by_timeout = True - - assert exited_by_timeout - mock_os_replace.assert_called_once() - + result_file = export(dst_format=format_name, task_id=task_id) + set_condition(export_file_path, result_file) + mock_os_replace.assert_not_called() def _clear(*_, file_path: str, file_ctime: str): from os import remove as original_remove - from cvat.apps.dataset_manager.util import LockNotAvailableError + + from cvat.apps.dataset_manager.views import FileIsBeingUsedError with ( - patch('cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_TIMEOUT', new=5), + patch("cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_TTL", new=EXPORT_CACHE_LOCK_TTL), + patch("cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT", new=EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT), + patch( + "cvat.apps.dataset_manager.views.get_export_cache_lock", + new=self.patched_get_export_cache_lock, + ), patch( - 'cvat.apps.dataset_manager.views.get_export_cache_lock', - new=patched_get_export_cache_lock + "cvat.apps.dataset_manager.views.os.remove" + ) as mock_os_remove, + patch( + "cvat.apps.dataset_manager.views.rq.get_current_job" + ) as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), + patch( + "cvat.apps.dataset_manager.views.TTL_CONSTS", + new={"task": export_outdated_after}, ), - patch('cvat.apps.dataset_manager.views.os.remove') as mock_os_remove, - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), - patch('cvat.apps.dataset_manager.views.TTL_CONSTS', new={'task': timedelta(seconds=0)}), ): + mock_rq_get_current_job.return_value = MagicMock(timeout=5) mock_os_remove.side_effect = chain_side_effects( - side_effect(wait_condition, export_created_the_file), original_remove, side_effect(set_condition, clear_removed_the_file), ) - mock_rq_get_current_job.return_value = MagicMock(timeout=5) - - exited_by_timeout = False try: clear_export_cache( file_path=file_path, file_ctime=file_ctime, logger=MagicMock() ) - except LockNotAvailableError: - # should come from waiting for get_export_cache_lock - exited_by_timeout = True - - assert exited_by_timeout + except FileIsBeingUsedError: + set_condition(clear_has_been_finished) + mock_os_remove.assert_not_called() # The problem checked is TOCTOU / race condition for file existence check and - # further file creation / removal. There are several possible variants of the problem. + # further file update / removal. There are several possible variants of the problem. # An example: - # 1. export checks the file exists, but outdated + # 1. export checks the file exists -> file is not outdated -> need to touch file's updated_date # 2. clear checks the file exists, and matches the creation timestamp - # 3. export creates the new export file - # 4. remove removes the new export file (instead of the one that it checked) + # 3. export updates the files's modification date and does not run actual export + # 4. remove removes the actual export file # Thus, we have no exported file after the successful export. - # + + # note: it is not possible to achieve the situation + # when clear process deletes newly "re-created by export process" + # file instead of the checked one since file names contain a timestamp. + # Other variants can be variations on the intermediate calls, such as getmtime: # - export: exists() # - clear: remove() # - export: getmtime() -> an exception + + # - clear_1: exists() + # - clear_2: remove() + # - clear_1: getmtime() -> an exception # etc. - images = self._generate_task_images(3) - task = self._create_task(tasks["main"], images) - self._create_annotations(task, f'{format_name} many jobs', "default") + task = self._setup_task_with_annotations(format_name=format_name) task_id = task["id"] with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), ): mock_rq_job = MagicMock(timeout=5) mock_rq_get_current_job.return_value = mock_rq_job + # create a file in the export cache first_export_path = export(dst_format=format_name, task_id=task_id) - export_instance_timestamp = parse_export_file_path(first_export_path).instance_timestamp + initial_file_modfication_time = os.path.getmtime(first_export_path) + # make sure that a file in the export cache is outdated by timeout + # and a file would have to be deleted if the export was not running in parallel + sleep(export_outdated_after.seconds + 1) - self._create_annotations(task, f'{format_name} many jobs', "default") + export_instance_timestamp = parse_export_file_path(first_export_path).instance_timestamp processes_finished_correctly = False with ExitStack() as es: # Run both operations concurrently # Threads could be faster, but they can't be terminated - export_process = es.enter_context(process_closing(multiprocessing.Process( - target=_export, - args=( - export_cache_lock, - export_checked_the_file, export_created_the_file, - export_file_path, clear_removed_the_file, - ), - kwargs=dict(task_id=task_id), - ))) - clear_process = es.enter_context(process_closing(multiprocessing.Process( - target=_clear, - args=( - export_cache_lock, - export_checked_the_file, export_created_the_file, - export_file_path, clear_removed_the_file, - ), - kwargs=dict(file_path=first_export_path, file_ctime=export_instance_timestamp), - ))) + export_process = es.enter_context( + process_closing( + multiprocessing.Process( + target=_export, + args=( + self.export_cache_lock, + export_checked_the_file, + ), + kwargs=dict(task_id=task_id), + ) + ) + ) + clear_process = es.enter_context( + process_closing( + multiprocessing.Process( + target=_clear, + args=( + self.export_cache_lock, + export_checked_the_file, + ), + kwargs=dict( + file_path=first_export_path, file_ctime=export_instance_timestamp + ), + ) + ) + ) export_process.start() - wait_condition(export_checked_the_file) # ensure the expected execution order + wait_condition(export_checked_the_file) # ensure the expected execution order clear_process.start() # A deadlock (interrupted by a timeout error) is the positive outcome in this test, # if the problem is fixed. # clear() must wait for the export cache lock release (acquired by export()). # It must be finished by a timeout, as export() holds it, waiting - clear_process.join(timeout=10) - - # export() must wait for the clear() file existence check and fail because of timeout - export_process.join(timeout=10) + clear_process.join(timeout=15) + export_process.join(timeout=15) self.assertFalse(export_process.is_alive()) self.assertFalse(clear_process.is_alive()) @@ -1598,17 +1632,17 @@ def _clear(*_, file_path: str, file_ctime: str): processes_finished_correctly = True self.assertTrue(processes_finished_correctly) - - # terminate() may break the locks, don't try to acquire - # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Process.terminate - self.assertTrue(export_checked_the_file.get()) - self.assertTrue(export_created_the_file.get()) - self.assertFalse(clear_removed_the_file.get()) new_export_path = export_file_path.get() self.assertGreater(len(new_export_path), 0) self.assertTrue(osp.isfile(new_export_path)) + self.assertTrue(osp.isfile(first_export_path)) + self.assertGreater(os.path.getmtime(first_export_path), initial_file_modfication_time) + + # terminate() may break the locks, don't try to acquire + # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Process.terminate + self.assertTrue(export_checked_the_file.get()) def test_concurrent_download_and_cleanup(self): side_effect = self.side_effect @@ -1619,14 +1653,10 @@ def test_concurrent_download_and_cleanup(self): format_name = "CVAT for images 1.1" - export_cache_lock = multiprocessing.Lock() - download_checked_the_file = self.SharedBool() clear_removed_the_file = self.SharedBool() - images = self._generate_task_images(3) - task = self._create_task(tasks["main"], images) - self._create_annotations(task, f'{format_name} many jobs', "default") + task = self._setup_task_with_annotations(format_name=format_name) task_id = task["id"] download_url = self._generate_url_dump_tasks_annotations(task_id) @@ -1634,30 +1664,6 @@ def test_concurrent_download_and_cleanup(self): "format": format_name, } - @contextmanager - def patched_get_export_cache_lock(export_path, *, ttl, block=True, acquire_timeout=None): - # fakeredis lock acquired in a subprocess won't be visible to other processes - # just implement the lock here - from cvat.apps.dataset_manager.util import LockNotAvailableError - - if isinstance(acquire_timeout, timedelta): - acquire_timeout = acquire_timeout.total_seconds() - if acquire_timeout is None: - acquire_timeout = -1 - - acquired = export_cache_lock.acquire( - block=block, - timeout=acquire_timeout if acquire_timeout > -1 else None - ) - - if not acquired: - raise LockNotAvailableError - - try: - yield - finally: - export_cache_lock.release() - def _download(*_, task_id: int, export_path: str): from os.path import exists as original_exists @@ -1668,16 +1674,16 @@ def patched_osp_exists(path: str): set_condition(download_checked_the_file) wait_condition( clear_removed_the_file, timeout=20 - ) # wait more than the process timeout + ) # wait more than the process timeout return result with ( patch( - 'cvat.apps.engine.views.dm.util.get_export_cache_lock', - new=patched_get_export_cache_lock + "cvat.apps.engine.views.dm.util.get_export_cache_lock", + new=self.patched_get_export_cache_lock, ), - patch('cvat.apps.dataset_manager.views.osp.exists') as mock_osp_exists, + patch("cvat.apps.dataset_manager.views.osp.exists") as mock_osp_exists, TemporaryDirectory() as temp_dir, ): mock_osp_exists.side_effect = patched_osp_exists @@ -1691,18 +1697,23 @@ def patched_osp_exists(path: str): def _clear(*_, file_path: str, file_ctime: str): from os import remove as original_remove + from cvat.apps.dataset_manager.util import LockNotAvailableError with ( - patch('cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_TIMEOUT', new=5), + patch("cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT", new=3), + patch( + "cvat.apps.dataset_manager.views.get_export_cache_lock", + new=self.patched_get_export_cache_lock, + ), + patch("cvat.apps.dataset_manager.views.os.remove") as mock_os_remove, patch( - 'cvat.apps.dataset_manager.views.get_export_cache_lock', - new=patched_get_export_cache_lock + "cvat.apps.dataset_manager.views.rq.get_current_job" + ) as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), + patch( + "cvat.apps.dataset_manager.views.TTL_CONSTS", new={"task": timedelta(seconds=0)} ), - patch('cvat.apps.dataset_manager.views.os.remove') as mock_os_remove, - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), - patch('cvat.apps.dataset_manager.views.TTL_CONSTS', new={'task': timedelta(seconds=0)}), ): mock_os_remove.side_effect = chain_side_effects( original_remove, @@ -1722,7 +1733,6 @@ def _clear(*_, file_path: str, file_ctime: str): assert exited_by_timeout - # The problem checked is TOCTOU / race condition for file existence check and # further file reading / removal. There are several possible variants of the problem. # An example: @@ -1748,7 +1758,7 @@ def patched_export(*args, **kwargs): return result - with patch('cvat.apps.dataset_manager.views.export', new=patched_export): + with patch("cvat.apps.dataset_manager.views.export", new=patched_export): response = self._get_request_with_data(download_url, download_params, self.admin) self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) @@ -1763,27 +1773,35 @@ def patched_export(*args, **kwargs): with ExitStack() as es: # Run both operations concurrently # Threads could be faster, but they can't be terminated - download_process = es.enter_context(process_closing(multiprocessing.Process( - target=_download, - args=(download_checked_the_file, clear_removed_the_file, export_cache_lock), - kwargs=dict(task_id=task_id, export_path=export_path), - ))) - clear_process = es.enter_context(process_closing(multiprocessing.Process( - target=_clear, - args=(download_checked_the_file, clear_removed_the_file, export_cache_lock), - kwargs=dict(file_path=export_path, file_ctime=export_instance_time), - ))) + download_process = es.enter_context( + process_closing( + multiprocessing.Process( + target=_download, + args=(download_checked_the_file, clear_removed_the_file), + kwargs=dict(task_id=task_id, export_path=export_path), + ) + ) + ) + clear_process = es.enter_context( + process_closing( + multiprocessing.Process( + target=_clear, + args=(download_checked_the_file, clear_removed_the_file), + kwargs=dict(file_path=export_path, file_ctime=export_instance_time), + ) + ) + ) download_process.start() - wait_condition(download_checked_the_file) # ensure the expected execution order + wait_condition(download_checked_the_file) # ensure the expected execution order clear_process.start() # A deadlock (interrupted by a timeout error) is the positive outcome in this test, # if the problem is fixed. # clear() must wait for the export cache lock release (acquired by download()). # It must be finished by a timeout, as download() holds it, waiting - clear_process.join(timeout=5) + clear_process.join(timeout=10) # download() must wait for the clear() file existence check and fail because of timeout download_process.join(timeout=5) @@ -1796,7 +1814,7 @@ def patched_export(*args, **kwargs): # All the expected exceptions should be handled in the process callbacks. # This is to avoid passing the test with unexpected errors - self.assertEqual(download_process.exitcode, -15) # sigterm + self.assertEqual(download_process.exitcode, -15) # sigterm self.assertEqual(clear_process.exitcode, 0) processes_finished_correctly = True @@ -1811,15 +1829,15 @@ def patched_export(*args, **kwargs): def test_export_can_create_file_and_cleanup_job(self): format_name = "CVAT for images 1.1" - images = self._generate_task_images(3) - task = self._create_task(tasks["main"], images) - self._create_annotations(task, f'{format_name} many jobs', "default") + task = self._setup_task_with_annotations(format_name=format_name) task_id = task["id"] with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler') as mock_rq_get_scheduler, - patch('cvat.apps.dataset_manager.views.TTL_CONSTS', new={'task': timedelta(seconds=0)}), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch( + "cvat.apps.dataset_manager.views.django_rq.get_scheduler" + ) as mock_rq_get_scheduler, + patch("cvat.apps.dataset_manager.views.TTL_CONSTS", new={"task": timedelta(seconds=0)}), ): mock_rq_job = MagicMock(timeout=5) mock_rq_get_current_job.return_value = mock_rq_job @@ -1837,24 +1855,23 @@ def test_export_cache_lock_can_raise_on_releasing_expired_lock(self): with self.assertRaises(ReleaseUnlockedLock): lock_time = 2 - with get_export_cache_lock('test_export_path', ttl=lock_time, acquire_timeout=5): + with get_export_cache_lock("test_export_path", ttl=lock_time, acquire_timeout=5): sleep(lock_time + 1) def test_export_can_request_retry_on_locking_failure(self): format_name = "CVAT for images 1.1" - images = self._generate_task_images(3) - task = self._create_task(tasks["main"], images) - self._create_annotations(task, f'{format_name} many jobs', "default") + task = self._setup_task_with_annotations(format_name=format_name) task_id = task["id"] from cvat.apps.dataset_manager.util import LockNotAvailableError + with ( patch( - 'cvat.apps.dataset_manager.views.get_export_cache_lock', - side_effect=LockNotAvailableError + "cvat.apps.dataset_manager.views.get_export_cache_lock", + side_effect=LockNotAvailableError, ) as mock_get_export_cache_lock, - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), self.assertRaises(LockNotAvailableError), ): mock_rq_job = MagicMock(timeout=5) @@ -1867,25 +1884,26 @@ def test_export_can_request_retry_on_locking_failure(self): def test_export_can_reuse_older_file_if_still_relevant(self): format_name = "CVAT for images 1.1" - images = self._generate_task_images(3) - task = self._create_task(tasks["main"], images) - self._create_annotations(task, f'{format_name} many jobs', "default") + task = self._setup_task_with_annotations(format_name=format_name) task_id = task["id"] with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), ): mock_rq_get_current_job.return_value = MagicMock(timeout=5) first_export_path = export(dst_format=format_name, task_id=task_id) from os.path import exists as original_exists + with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), - patch('cvat.apps.dataset_manager.views.osp.exists', side_effect=original_exists) as mock_osp_exists, - patch('cvat.apps.dataset_manager.views.os.replace') as mock_os_replace, + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), + patch( + "cvat.apps.dataset_manager.views.osp_exists", side_effect=original_exists + ) as mock_osp_exists, + patch("cvat.apps.dataset_manager.views.os.replace") as mock_os_replace, ): mock_rq_get_current_job.return_value = MagicMock(timeout=5) @@ -1895,25 +1913,176 @@ def test_export_can_reuse_older_file_if_still_relevant(self): mock_osp_exists.assert_called_with(first_export_path) mock_os_replace.assert_not_called() + def test_initiate_concurrent_export_by_different_users(self): + side_effect = self.side_effect + chain_side_effects = self.chain_side_effects + process_closing = self.process_closing + wait_condition = self.wait_condition + set_condition = self.set_condition + + export_1_checked_file = self.SharedBool() + export_1_made_export = self.SharedBool() + export_1_replaced_file = self.SharedBool() + + export_2_checked_file = self.SharedBool() + export_2_made_export = self.SharedBool() + export_2_replaced_file = self.SharedBool() + + format_name = "CVAT for images 1.1" + + LOCK_TTL = 4 + LOCK_ACQUISITION_TIMEOUT = LOCK_TTL * 2 + + def _export_1( + *_, + task_id: int, + result_queue: multiprocessing.Queue, + ): + from os import replace as original_replace + + from cvat.apps.dataset_manager.task import export_task as original_export_task + + with ( + patch("cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_TTL", new=LOCK_TTL), + patch( + "cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT", + new=LOCK_ACQUISITION_TIMEOUT, + ), + patch( + "cvat.apps.dataset_manager.views.get_export_cache_lock", + new=self.patched_get_export_cache_lock, + ), + patch("cvat.apps.dataset_manager.views.os.replace") as mock_os_replace, + patch("cvat.apps.dataset_manager.views.task.export_task") as mock_export_fn, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), + ): + mock_export_fn.side_effect = chain_side_effects( + side_effect(set_condition, export_1_checked_file), + original_export_task, + side_effect(wait_condition, export_2_checked_file), + side_effect(set_condition, export_1_made_export), + ) + + mock_os_replace.side_effect = chain_side_effects( + original_replace, + side_effect(set_condition, export_1_replaced_file), + ) + result_file_path = export(dst_format=format_name, task_id=task_id) + result_queue.put(result_file_path) + + mock_export_fn.assert_called_once() + mock_os_replace.assert_called_once() + + def _export_2( + *_, + task_id: int, + result_queue: multiprocessing.Queue, + ): + from os import replace as original_replace + + from cvat.apps.dataset_manager.task import export_task as original_export_task + + with ( + patch("cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_TTL", new=LOCK_TTL), + patch( + "cvat.apps.dataset_manager.views.EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT", + new=LOCK_ACQUISITION_TIMEOUT, + ), + patch( + "cvat.apps.dataset_manager.views.get_export_cache_lock", + new=self.patched_get_export_cache_lock, + ), + patch("cvat.apps.dataset_manager.views.os.replace") as mock_os_replace, + patch("cvat.apps.dataset_manager.views.task.export_task") as mock_export_fn, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), + ): + mock_export_fn.side_effect = chain_side_effects( + side_effect(set_condition, export_2_checked_file), + original_export_task, + side_effect(wait_condition, export_1_replaced_file), + side_effect(set_condition, export_2_made_export), + ) + + mock_os_replace.side_effect = chain_side_effects( + original_replace, + side_effect(set_condition, export_2_replaced_file), + ) + result_file_path = export(dst_format=format_name, task_id=task_id) + result_queue.put(result_file_path) + + mock_export_fn.assert_called_once() + mock_os_replace.assert_called_once() + + task = self._setup_task_with_annotations(format_name=format_name) + + with ExitStack() as es: + result_queue = multiprocessing.Queue() + number_of_processes = 2 + export_process_1 = es.enter_context( + process_closing( + multiprocessing.Process( + target=_export_1, + kwargs=dict( + task_id=task["id"], + result_queue=result_queue, + ), + ) + ) + ) + export_process_2 = es.enter_context( + process_closing( + multiprocessing.Process( + target=_export_2, + kwargs=dict( + task_id=task["id"], + result_queue=result_queue, + ), + ) + ) + ) + + export_process_1.start() + wait_condition(export_1_checked_file) + + export_process_2.start() + export_process_2.join(timeout=20) + export_process_1.join(timeout=20) + + self.assertFalse(export_process_1.is_alive()) + self.assertFalse(export_process_2.is_alive()) + + self.assertEqual(export_process_1.exitcode, 0) + self.assertEqual(export_process_2.exitcode, 0) + paths = {result_queue.get() for _ in range(number_of_processes)} + result_queue.close() + + self.assertTrue(len(paths) == 1) + self.assertNotEqual(paths, {None}) + self.assertTrue(osp.isfile(list(paths)[0])) + + for cond in ( + export_1_checked_file, export_1_made_export, export_1_replaced_file, + export_2_checked_file, export_2_made_export, export_2_replaced_file + ): + self.assertTrue(cond.get()) + def test_cleanup_can_remove_file(self): format_name = "CVAT for images 1.1" - images = self._generate_task_images(3) - task = self._create_task(tasks["main"], images) - self._create_annotations(task, f'{format_name} many jobs', "default") + task = self._setup_task_with_annotations(format_name=format_name) task_id = task["id"] with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), ): mock_rq_get_current_job.return_value = MagicMock(timeout=5) export_path = export(dst_format=format_name, task_id=task_id) with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), - patch('cvat.apps.dataset_manager.views.TTL_CONSTS', new={'task': timedelta(seconds=0)}), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), + patch("cvat.apps.dataset_manager.views.TTL_CONSTS", new={"task": timedelta(seconds=0)}), ): mock_rq_get_current_job.return_value = MagicMock(timeout=5) @@ -1925,15 +2094,14 @@ def test_cleanup_can_remove_file(self): def test_cleanup_can_request_retry_on_locking_failure(self): format_name = "CVAT for images 1.1" - images = self._generate_task_images(3) - task = self._create_task(tasks["main"], images) - self._create_annotations(task, f'{format_name} many jobs', "default") + task = self._setup_task_with_annotations(format_name=format_name) task_id = task["id"] from cvat.apps.dataset_manager.util import LockNotAvailableError + with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), ): mock_rq_get_current_job.return_value = MagicMock(timeout=5) @@ -1941,11 +2109,11 @@ def test_cleanup_can_request_retry_on_locking_failure(self): with ( patch( - 'cvat.apps.dataset_manager.views.get_export_cache_lock', - side_effect=LockNotAvailableError + "cvat.apps.dataset_manager.views.get_export_cache_lock", + side_effect=LockNotAvailableError, ) as mock_get_export_cache_lock, - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), self.assertRaises(LockNotAvailableError), ): mock_rq_job = MagicMock(timeout=5) @@ -1960,8 +2128,8 @@ def test_cleanup_can_request_retry_on_locking_failure(self): def test_cleanup_can_fail_if_no_file(self): with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), self.assertRaises(FileNotFoundError), ): mock_rq_job = MagicMock(timeout=5) @@ -1971,23 +2139,22 @@ def test_cleanup_can_fail_if_no_file(self): def test_cleanup_can_defer_removal_if_file_is_used_recently(self): format_name = "CVAT for images 1.1" - images = self._generate_task_images(3) - task = self._create_task(tasks["main"], images) - self._create_annotations(task, f'{format_name} many jobs', "default") + task = self._setup_task_with_annotations(format_name=format_name) task_id = task["id"] with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), ): mock_rq_get_current_job.return_value = MagicMock(timeout=5) export_path = export(dst_format=format_name, task_id=task_id) from cvat.apps.dataset_manager.views import FileIsBeingUsedError + with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.TTL_CONSTS', new={'task': timedelta(hours=1)}), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.TTL_CONSTS", new={"task": timedelta(hours=1)}), self.assertRaises(FileIsBeingUsedError), ): mock_rq_job = MagicMock(timeout=5) @@ -2006,14 +2173,12 @@ def test_cleanup_can_be_called_with_old_signature_and_values(self): # Jobs referring to the old API can exist in the redis queues after the server is updated format_name = "CVAT for images 1.1" - images = self._generate_task_images(3) - task = self._create_task(tasks["main"], images) - self._create_annotations(task, f'{format_name} many jobs', "default") + task = self._setup_task_with_annotations(format_name=format_name) task_id = task["id"] with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.django_rq.get_scheduler'), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.django_rq.get_scheduler"), ): mock_rq_get_current_job.return_value = MagicMock(timeout=5) @@ -2027,14 +2192,14 @@ def test_cleanup_can_be_called_with_old_signature_and_values(self): shutil.move(new_export_path, old_export_path) old_kwargs = { - 'file_path': old_export_path, - 'file_ctime': file_ctime, - 'logger': MagicMock(), + "file_path": old_export_path, + "file_ctime": file_ctime, + "logger": MagicMock(), } with ( - patch('cvat.apps.dataset_manager.views.rq.get_current_job') as mock_rq_get_current_job, - patch('cvat.apps.dataset_manager.views.TTL_CONSTS', new={'task': timedelta(seconds=0)}), + patch("cvat.apps.dataset_manager.views.rq.get_current_job") as mock_rq_get_current_job, + patch("cvat.apps.dataset_manager.views.TTL_CONSTS", new={"task": timedelta(seconds=0)}), ): mock_rq_get_current_job.return_value = MagicMock(timeout=5) diff --git a/cvat/apps/dataset_manager/tests/utils.py b/cvat/apps/dataset_manager/tests/utils.py new file mode 100644 index 000000000000..6e3b51a878d9 --- /dev/null +++ b/cvat/apps/dataset_manager/tests/utils.py @@ -0,0 +1,78 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import os +import tempfile +import unittest +from types import TracebackType +from typing import Optional + +from datumaro.util.os_util import rmfile, rmtree + +from cvat.apps.dataset_manager.util import current_function_name + + +class FileRemover: + def __init__(self, path: str, is_dir: bool = False): + self.path = path + self.is_dir = is_dir + + def __enter__(self) -> str: + return self.path + + def __exit__( + self, + exc_type: Optional[type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + if self.is_dir: + try: + rmtree(self.path) + except unittest.SkipTest: + # Suppress skip test errors from git.util.rmtree + if not exc_type: + raise + else: + rmfile(self.path) + + +class TestDir(FileRemover): + """ + Creates a temporary directory for a test. Uses the name of + the test function to name the directory. + + Usage: + + .. code-block:: + + with TestDir() as test_dir: + ... + """ + + def __init__(self, path: Optional[str] = None, frame_id: int = 2): + if not path: + prefix = f"temp_{current_function_name(frame_id)}-" + else: + prefix = None + self._prefix = prefix + + super().__init__(path, is_dir=True) + + def __enter__(self) -> str: + """ + Creates a test directory. + + Returns: path to the directory + """ + + path = self.path + + if path is None: + path = tempfile.mkdtemp(dir=os.getcwd(), prefix=self._prefix) + self.path = path + else: + os.makedirs(path, exist_ok=False) + + return path diff --git a/cvat/apps/dataset_manager/util.py b/cvat/apps/dataset_manager/util.py index 0193748446f3..6d814ec2c679 100644 --- a/cvat/apps/dataset_manager/util.py +++ b/cvat/apps/dataset_manager/util.py @@ -8,11 +8,12 @@ import os.path as osp import re import zipfile +from collections.abc import Generator, Sequence from contextlib import contextmanager from copy import deepcopy from datetime import timedelta from threading import Lock -from typing import Any, Generator, Optional, Sequence +from typing import Any, Optional import attrs import django_rq @@ -110,14 +111,16 @@ def get_export_cache_lock( *, ttl: int | timedelta, block: bool = True, - acquire_timeout: Optional[int | timedelta] = None, + acquire_timeout: int | timedelta, ) -> Generator[Lock, Any, Any]: + assert acquire_timeout is not None, "Endless waiting for the lock should be avoided" + if isinstance(acquire_timeout, timedelta): acquire_timeout = acquire_timeout.total_seconds() - if acquire_timeout is not None and acquire_timeout < 0: + + if acquire_timeout < 0: raise ValueError("acquire_timeout must be a non-negative number") - elif acquire_timeout is None: - acquire_timeout = -1 + if isinstance(ttl, timedelta): ttl = ttl.total_seconds() @@ -232,3 +235,9 @@ def parse_export_file_path(file_path: os.PathLike[str]) -> ParsedExportFilename: format_repr=basename_match.group('format_tag'), file_ext=basename_match.group('file_ext'), ) + +def extend_export_file_lifetime(file_path: str): + # Update the last modification time to extend the export's lifetime, + # as the last access time is not available on every filesystem. + # As a result, file deletion by the cleaning job will be postponed. + os.utime(file_path, None) diff --git a/cvat/apps/dataset_manager/views.py b/cvat/apps/dataset_manager/views.py index 35e40c8c03a3..52bc9cd15f7a 100644 --- a/cvat/apps/dataset_manager/views.py +++ b/cvat/apps/dataset_manager/views.py @@ -11,6 +11,7 @@ import django_rq import rq +from os.path import exists as osp_exists from django.conf import settings from django.utils import timezone from rq_scheduler import Scheduler @@ -20,27 +21,30 @@ from cvat.apps.engine.log import ServerLogManager from cvat.apps.engine.models import Job, Project, Task from cvat.apps.engine.utils import get_rq_lock_by_user +from cvat.apps.engine.rq_job_handler import RQMeta from .formats.registry import EXPORT_FORMATS, IMPORT_FORMATS from .util import ( LockNotAvailableError, current_function_name, get_export_cache_lock, get_export_cache_dir, make_export_filename, - parse_export_file_path + parse_export_file_path, extend_export_file_lifetime ) from .util import EXPORT_CACHE_DIR_NAME # pylint: disable=unused-import + slogger = ServerLogManager(__name__) _MODULE_NAME = __package__ + '.' + osp.splitext(osp.basename(__file__))[0] -def log_exception(logger=None, exc_info=True): + +def log_exception(logger: logging.Logger | None = None, exc_info: bool = True): if logger is None: - logger = slogger + logger = slogger.glob logger.exception("[%s @ %s]: exception occurred" % \ (_MODULE_NAME, current_function_name(2)), exc_info=exc_info) -DEFAULT_CACHE_TTL = timedelta(seconds=settings.DATASET_CACHE_TTL) +DEFAULT_CACHE_TTL = timedelta(seconds=settings.EXPORT_CACHE_TTL) PROJECT_CACHE_TTL = DEFAULT_CACHE_TTL TASK_CACHE_TTL = DEFAULT_CACHE_TTL JOB_CACHE_TTL = DEFAULT_CACHE_TTL @@ -50,8 +54,9 @@ def log_exception(logger=None, exc_info=True): 'job': JOB_CACHE_TTL, } -EXPORT_CACHE_LOCK_TIMEOUT = timedelta(seconds=settings.DATASET_CACHE_LOCK_TIMEOUT) -EXPORT_LOCKED_RETRY_INTERVAL = timedelta(seconds=settings.DATASET_EXPORT_LOCKED_RETRY_INTERVAL) +EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT = timedelta(seconds=settings.EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT) +EXPORT_CACHE_LOCK_TTL = timedelta(seconds=settings.EXPORT_CACHE_LOCK_TTL) +EXPORT_LOCKED_RETRY_INTERVAL = timedelta(seconds=settings.EXPORT_LOCKED_RETRY_INTERVAL) def get_export_cache_ttl(db_instance: str | Project | Task | Job) -> timedelta: @@ -60,6 +65,14 @@ def get_export_cache_ttl(db_instance: str | Project | Task | Job) -> timedelta: return TTL_CONSTS[db_instance.lower()] +def _patch_scheduled_job_status(job: rq.job.Job): + # NOTE: rq scheduler < 0.14 does not set the appropriate + # job status (SCHEDULED). This has been fixed in the 0.14 version. + # https://github.com/rq/rq-scheduler/blob/f7d5787c5f94b5517e209c612ef648f4bfc44f9e/rq_scheduler/scheduler.py#L148 + # FUTURE-TODO: delete manual status setting after upgrading to 0.14 + if job.get_status(refresh=False) != rq.job.JobStatus.SCHEDULED: + job.set_status(rq.job.JobStatus.SCHEDULED) + def _retry_current_rq_job(time_delta: timedelta) -> rq.job.Job: # TODO: implement using retries once we move from rq_scheduler to builtin RQ scheduler # for better reliability and error reporting @@ -78,26 +91,34 @@ def _patched_retry(*_1, **_2): user_id = current_rq_job.meta.get('user', {}).get('id') or -1 with get_rq_lock_by_user(settings.CVAT_QUEUES.EXPORT_DATA.value, user_id): - scheduler.enqueue_in( + scheduled_rq_job: rq.job.Job = scheduler.enqueue_in( time_delta, current_rq_job.func, *current_rq_job.args, **current_rq_job.kwargs, job_id=current_rq_job.id, - meta=current_rq_job.meta, - depends_on=current_rq_job.dependency_ids, + meta=RQMeta.reset_meta_on_retry(current_rq_job.meta), job_ttl=current_rq_job.ttl, job_result_ttl=current_rq_job.result_ttl, job_description=current_rq_job.description, on_success=current_rq_job.success_callback, on_failure=current_rq_job.failure_callback, ) + _patch_scheduled_job_status(scheduled_rq_job) current_rq_job.retries_left = 1 setattr(current_rq_job, 'retry', _patched_retry) return current_rq_job -def export(dst_format, project_id=None, task_id=None, job_id=None, server_url=None, save_images=False): +def export( + *, + dst_format: str, + project_id: int | None = None, + task_id: int | None = None, + job_id: int | None = None, + server_url: str | None = None, + save_images: bool = False, +): try: if task_id is not None: logger = slogger.task[task_id] @@ -134,41 +155,50 @@ def export(dst_format, project_id=None, task_id=None, job_id=None, server_url=No os.makedirs(cache_dir, exist_ok=True) + # acquire a lock 2 times instead of using one long lock: + # 1. to check whether the file exists or not + # 2. to create a file when it doesn't exist with get_export_cache_lock( output_path, - block=True, - acquire_timeout=EXPORT_CACHE_LOCK_TIMEOUT, - ttl=rq.get_current_job().timeout, + ttl=EXPORT_CACHE_LOCK_TTL, + acquire_timeout=EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT, ): - if not osp.exists(output_path): - with tempfile.TemporaryDirectory(dir=cache_dir) as temp_dir: - temp_file = osp.join(temp_dir, 'result') - export_fn(db_instance.id, temp_file, dst_format, - server_url=server_url, save_images=save_images) - os.replace(temp_file, output_path) - - scheduler: Scheduler = django_rq.get_scheduler( - settings.CVAT_QUEUES.EXPORT_DATA.value - ) - cleaning_job = scheduler.enqueue_in( - time_delta=cache_ttl, - func=clear_export_cache, - file_path=output_path, - file_ctime=instance_update_time.timestamp(), - logger=logger - ) - logger.info( - "The {} '{}' is exported as '{}' at '{}' " - "and available for downloading for the next {}. " - "Export cache cleaning job is enqueued, id '{}'".format( - db_instance.__class__.__name__.lower(), - db_instance.name if isinstance( - db_instance, (Project, Task) - ) else db_instance.id, - dst_format, output_path, cache_ttl, - cleaning_job.id - ) - ) + if osp_exists(output_path): + extend_export_file_lifetime(output_path) + return output_path + + with tempfile.TemporaryDirectory(dir=cache_dir) as temp_dir: + temp_file = osp.join(temp_dir, 'result') + export_fn(db_instance.id, temp_file, dst_format, + server_url=server_url, save_images=save_images) + with get_export_cache_lock( + output_path, + ttl=EXPORT_CACHE_LOCK_TTL, + acquire_timeout=EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT, + ): + os.replace(temp_file, output_path) + + scheduler: Scheduler = django_rq.get_scheduler(settings.CVAT_QUEUES.EXPORT_DATA.value) + cleaning_job = scheduler.enqueue_in( + time_delta=cache_ttl, + func=clear_export_cache, + file_path=output_path, + file_ctime=instance_update_time.timestamp(), + logger=logger, + ) + _patch_scheduled_job_status(cleaning_job) + logger.info( + "The {} '{}' is exported as '{}' at '{}' " + "and available for downloading for the next {}. " + "Export cache cleaning job is enqueued, id '{}'".format( + db_instance.__class__.__name__.lower(), + db_instance.id, + dst_format, + output_path, + cache_ttl, + cleaning_job.id, + ) + ) return output_path except LockNotAvailableError: @@ -184,23 +214,23 @@ def export(dst_format, project_id=None, task_id=None, job_id=None, server_url=No log_exception(logger) raise -def export_job_annotations(job_id, dst_format=None, server_url=None): - return export(dst_format,job_id=job_id, server_url=server_url, save_images=False) +def export_job_annotations(job_id: int, dst_format: str, *, server_url: str | None = None): + return export(dst_format=dst_format, job_id=job_id, server_url=server_url, save_images=False) -def export_job_as_dataset(job_id, dst_format=None, server_url=None): - return export(dst_format, job_id=job_id, server_url=server_url, save_images=True) +def export_job_as_dataset(job_id: int, dst_format: str, *, server_url: str | None = None): + return export(dst_format=dst_format, job_id=job_id, server_url=server_url, save_images=True) -def export_task_as_dataset(task_id, dst_format=None, server_url=None): - return export(dst_format, task_id=task_id, server_url=server_url, save_images=True) +def export_task_as_dataset(task_id: int, dst_format: str, *, server_url: str | None = None): + return export(dst_format=dst_format, task_id=task_id, server_url=server_url, save_images=True) -def export_task_annotations(task_id, dst_format=None, server_url=None): - return export(dst_format,task_id=task_id, server_url=server_url, save_images=False) +def export_task_annotations(task_id: int, dst_format: str, *, server_url: str | None = None): + return export(dst_format=dst_format, task_id=task_id, server_url=server_url, save_images=False) -def export_project_as_dataset(project_id, dst_format=None, server_url=None): - return export(dst_format, project_id=project_id, server_url=server_url, save_images=True) +def export_project_as_dataset(project_id: int, dst_format: str, *, server_url: str | None = None): + return export(dst_format=dst_format, project_id=project_id, server_url=server_url, save_images=True) -def export_project_annotations(project_id, dst_format=None, server_url=None): - return export(dst_format, project_id=project_id, server_url=server_url, save_images=False) +def export_project_annotations(project_id: int, dst_format: str, *, server_url: str | None = None): + return export(dst_format=dst_format, project_id=project_id, server_url=server_url, save_images=False) class FileIsBeingUsedError(Exception): @@ -213,8 +243,8 @@ def clear_export_cache(file_path: str, file_ctime: float, logger: logging.Logger with get_export_cache_lock( file_path, block=True, - acquire_timeout=EXPORT_CACHE_LOCK_TIMEOUT, - ttl=rq.get_current_job().timeout, + acquire_timeout=EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT, + ttl=EXPORT_CACHE_LOCK_TTL, ): if not osp.exists(file_path): raise FileNotFoundError("Export cache file '{}' doesn't exist".format(file_path)) diff --git a/cvat/apps/engine/background.py b/cvat/apps/engine/background.py index 441d4702014d..a3a2d34326b9 100644 --- a/cvat/apps/engine/background.py +++ b/cvat/apps/engine/background.py @@ -7,14 +7,14 @@ from abc import ABC, abstractmethod from dataclasses import dataclass from datetime import datetime -from typing import Any, Callable, Dict, Optional, Union +from typing import Any, Callable, Optional, Union import django_rq from attrs.converters import to_bool from django.conf import settings from django.http.response import HttpResponseBadRequest from django.utils import timezone -from django_rq.queues import DjangoRQ +from django_rq.queues import DjangoRQ, DjangoScheduler from rest_framework import serializers, status from rest_framework.request import Request from rest_framework.response import Response @@ -52,6 +52,11 @@ slogger = ServerLogManager(__name__) +REQUEST_TIMEOUT = 60 +# it's better to return LockNotAvailableError instead of response with 504 status +LOCK_TTL = REQUEST_TIMEOUT - 5 +LOCK_ACQUIRE_TIMEOUT = LOCK_TTL - 5 + class _ResourceExportManager(ABC): QUEUE_NAME = settings.CVAT_QUEUES.EXPORT_DATA.value @@ -89,7 +94,7 @@ def setup_background_job(self, queue: DjangoRQ, rq_id: str) -> None: def _handle_rq_job_v1(self, rq_job: Optional[RQJob], queue: DjangoRQ) -> Optional[Response]: pass - def _handle_rq_job_v2(self, rq_job: Optional[RQJob], *args, **kwargs) -> Optional[Response]: + def _handle_rq_job_v2(self, rq_job: Optional[RQJob], queue: DjangoRQ) -> Optional[Response]: if not rq_job: return None @@ -101,17 +106,23 @@ def _handle_rq_job_v2(self, rq_job: Optional[RQJob], *args, **kwargs) -> Optiona status=status.HTTP_409_CONFLICT, ) - if rq_job_status in (RQJobStatus.SCHEDULED, RQJobStatus.DEFERRED): + if rq_job_status == RQJobStatus.DEFERRED: + rq_job.cancel(enqueue_dependents=settings.ONE_RUNNING_JOB_IN_QUEUE_PER_USER) + + if rq_job_status == RQJobStatus.SCHEDULED: + scheduler: DjangoScheduler = django_rq.get_scheduler(queue.name, queue=queue) + # remove the job id from the set with scheduled keys + scheduler.cancel(rq_job) rq_job.cancel(enqueue_dependents=settings.ONE_RUNNING_JOB_IN_QUEUE_PER_USER) rq_job.delete() return None - def handle_rq_job(self, *args, **kwargs) -> Optional[Response]: + def handle_rq_job(self, rq_job: RQJob | None, queue: DjangoRQ) -> Optional[Response]: if self.version == 1: - return self._handle_rq_job_v1(*args, **kwargs) + return self._handle_rq_job_v1(rq_job, queue) elif self.version == 2: - return self._handle_rq_job_v2(*args, **kwargs) + return self._handle_rq_job_v2(rq_job, queue) raise ValueError("Unsupported version") @@ -159,7 +170,7 @@ class ExportArgs: format: str filename: str save_images: bool - location_config: Dict[str, Any] + location_config: dict[str, Any] @property def location(self) -> Location: @@ -220,7 +231,9 @@ def is_result_outdated() -> bool: return rq_job.meta[RQJobMetaField.REQUEST]["timestamp"] < instance_update_time def handle_local_download() -> Response: - with dm.util.get_export_cache_lock(file_path, ttl=REQUEST_TIMEOUT): + with dm.util.get_export_cache_lock( + file_path, ttl=LOCK_TTL, acquire_timeout=LOCK_ACQUIRE_TIMEOUT + ): if not osp.exists(file_path): return Response( "The exported file has expired, please retry exporting", @@ -295,8 +308,6 @@ def handle_local_download() -> Response: instance_update_time = self.get_instance_update_time() instance_timestamp = self.get_timestamp(instance_update_time) - REQUEST_TIMEOUT = 60 - if rq_job_status == RQJobStatus.FINISHED: if self.export_args.location == Location.CLOUD_STORAGE: rq_job.delete() @@ -313,11 +324,13 @@ def handle_local_download() -> Response: if action == "download": return handle_local_download() else: - with dm.util.get_export_cache_lock(file_path, ttl=REQUEST_TIMEOUT): + with dm.util.get_export_cache_lock( + file_path, + ttl=LOCK_TTL, + acquire_timeout=LOCK_ACQUIRE_TIMEOUT, + ): if osp.exists(file_path) and not is_result_outdated(): - # Update last update time to prolong the export lifetime - # as the last access time is not available on every filesystem - os.utime(file_path, None) + dm.util.extend_export_file_lifetime(file_path) return Response(status=status.HTTP_201_CREATED) @@ -422,7 +435,7 @@ def setup_background_job( user_id = self.request.user.id func = self.export_callback - func_args = (self.db_instance.id, self.export_args.format, server_address) + func_args = (self.db_instance.id, self.export_args.format) result_url = None if self.export_args.location == Location.CLOUD_STORAGE: @@ -467,6 +480,9 @@ def setup_background_job( queue.enqueue_call( func=func, args=func_args, + kwargs={ + "server_url": server_address, + }, job_id=rq_id, meta=get_rq_job_meta( request=self.request, db_obj=self.db_instance, result_url=result_url @@ -499,7 +515,7 @@ class BackupExportManager(_ResourceExportManager): @dataclass class ExportArgs: filename: str - location_config: Dict[str, Any] + location_config: dict[str, Any] @property def location(self) -> Location: diff --git a/cvat/apps/engine/backup.py b/cvat/apps/engine/backup.py index 499700a3b4ef..3c8ba5678c24 100644 --- a/cvat/apps/engine/backup.py +++ b/cvat/apps/engine/backup.py @@ -10,10 +10,11 @@ import shutil import tempfile import uuid +from collections.abc import Collection, Iterable from enum import Enum from logging import Logger from tempfile import NamedTemporaryFile -from typing import Any, Collection, Dict, Iterable, Optional, Union +from typing import Any, Optional, Union from zipfile import ZipFile import django_rq @@ -650,7 +651,7 @@ def _calculate_segment_size(jobs): return segment_size, overlap @staticmethod - def _parse_segment_frames(*, jobs: Dict[str, Any]) -> JobFileMapping: + def _parse_segment_frames(*, jobs: dict[str, Any]) -> JobFileMapping: segments = [] for i, segment in enumerate(jobs): diff --git a/cvat/apps/engine/cache.py b/cvat/apps/engine/cache.py index b2a3437ddf3f..43c2be7bc57e 100644 --- a/cvat/apps/engine/cache.py +++ b/cvat/apps/engine/cache.py @@ -10,31 +10,29 @@ import os.path import pickle # nosec import tempfile +import time import zipfile import zlib +from collections.abc import Collection, Generator, Iterator, Sequence from contextlib import ExitStack, closing from datetime import datetime, timezone from itertools import groupby, pairwise -from typing import ( - Any, - Callable, - Collection, - Generator, - Iterator, - Optional, - Sequence, - Tuple, - Type, - Union, - overload, -) +from typing import Any, Callable, Optional, Union, overload +import attrs import av import cv2 +import django_rq import PIL.Image import PIL.ImageOps +import rq +from django.conf import settings from django.core.cache import caches +from django.db import models as django_models +from django.utils import timezone as django_tz +from redis.exceptions import LockError from rest_framework.exceptions import NotFound, ValidationError +from rq.job import JobStatus as RQJobStatus from cvat.apps.engine import models from cvat.apps.engine.cloud_provider import ( @@ -53,75 +51,267 @@ VideoReaderWithManifest, ZipChunkWriter, ZipCompressedChunkWriter, + load_image, +) +from cvat.apps.engine.rq_job_handler import RQJobMetaField +from cvat.apps.engine.utils import ( + CvatChunkTimestampMismatchError, + format_list, + get_rq_lock_for_job, + md5_hash, ) -from cvat.apps.engine.utils import md5_hash, preload_images from utils.dataset_manifest import ImageManifestManager slogger = ServerLogManager(__name__) -DataWithMime = Tuple[io.BytesIO, str] -_CacheItem = Tuple[io.BytesIO, str, int] +DataWithMime = tuple[io.BytesIO, str] +_CacheItem = tuple[io.BytesIO, str, int, Union[datetime, None]] + + +def enqueue_create_chunk_job( + queue: rq.Queue, + rq_job_id: str, + create_callback: Callback, + *, + rq_job_result_ttl: int = 60, + rq_job_failure_ttl: int = 3600 * 24 * 14, # 2 weeks +) -> rq.job.Job: + try: + with get_rq_lock_for_job(queue, rq_job_id): + rq_job = queue.fetch_job(rq_job_id) + + if not rq_job or ( + # Enqueue the job if the chunk was deleted but the RQ job still exists. + # This can happen in cases involving jobs with honeypots and + # if the job wasn't collected by the requesting process for any reason. + rq_job.get_status(refresh=False) + in {RQJobStatus.FINISHED, RQJobStatus.FAILED, RQJobStatus.CANCELED} + ): + rq_job = queue.enqueue( + create_callback, + job_id=rq_job_id, + result_ttl=rq_job_result_ttl, + failure_ttl=rq_job_failure_ttl, + ) + except LockError: + raise TimeoutError(f"Cannot acquire lock for {rq_job_id}") + + return rq_job + + +def wait_for_rq_job(rq_job: rq.job.Job): + retries = settings.CVAT_CHUNK_CREATE_TIMEOUT // settings.CVAT_CHUNK_CREATE_CHECK_INTERVAL or 1 + while retries > 0: + job_status = rq_job.get_status() + if job_status in ("finished",): + return + elif job_status in ("failed",): + job_meta = rq_job.get_meta() + exc_type = job_meta.get(RQJobMetaField.EXCEPTION_TYPE, Exception) + exc_args = job_meta.get(RQJobMetaField.EXCEPTION_ARGS, ("Cannot create chunk",)) + raise exc_type(*exc_args) + + time.sleep(settings.CVAT_CHUNK_CREATE_CHECK_INTERVAL) + retries -= 1 + + raise TimeoutError(f"Chunk processing takes too long {rq_job.id}") + + +def _is_run_inside_rq() -> bool: + return rq.get_current_job() is not None + + +def _convert_args_for_callback(func_args: list[Any]) -> list[Any]: + result = [] + for func_arg in func_args: + if _is_run_inside_rq(): + result.append(func_arg) + else: + if isinstance( + func_arg, + django_models.Model, + ): + result.append(func_arg.id) + elif isinstance(func_arg, list): + result.append(_convert_args_for_callback(func_arg)) + else: + result.append(func_arg) + + return result + + +@attrs.frozen +class Callback: + _callable: Callable[..., DataWithMime] = attrs.field( + validator=attrs.validators.is_callable(), + ) + _args: list[Any] = attrs.field( + factory=list, + validator=attrs.validators.instance_of(list), + converter=_convert_args_for_callback, + ) + _kwargs: dict[str, Union[bool, int, float, str, None]] = attrs.field( + factory=dict, + validator=attrs.validators.deep_mapping( + key_validator=attrs.validators.instance_of(str), + value_validator=attrs.validators.instance_of((bool, int, float, str, type(None))), + mapping_validator=attrs.validators.instance_of(dict), + ), + ) + + def __call__(self) -> DataWithMime: + return self._callable(*self._args, **self._kwargs) class MediaCache: - def __init__(self) -> None: - self._cache = caches["media"] + _QUEUE_NAME = settings.CVAT_QUEUES.CHUNKS.value + _QUEUE_JOB_PREFIX_TASK = "chunks:prepare-item-" + _CACHE_NAME = "media" + _PREVIEW_TTL = settings.CVAT_PREVIEW_CACHE_TTL + + @staticmethod + def _cache(): + return caches[MediaCache._CACHE_NAME] - def _get_checksum(self, value: bytes) -> int: + @staticmethod + def _get_checksum(value: bytes) -> int: return zlib.crc32(value) def _get_or_set_cache_item( - self, key: str, create_callback: Callable[[], DataWithMime] + self, + key: str, + create_callback: Callback, + *, + cache_item_ttl: Optional[int] = None, ) -> _CacheItem: - def create_item() -> _CacheItem: - slogger.glob.info(f"Starting to prepare chunk: key {key}") - item_data = create_callback() - slogger.glob.info(f"Ending to prepare chunk: key {key}") + item = self._get_cache_item(key) + if item: + return item - item_data_bytes = item_data[0].getvalue() - item = (item_data[0], item_data[1], self._get_checksum(item_data_bytes)) - if item_data_bytes: - self._cache.set(key, item) + return self._create_cache_item( + key, + create_callback, + cache_item_ttl=cache_item_ttl, + ) - return item + @classmethod + def _get_queue(cls) -> rq.Queue: + return django_rq.get_queue(cls._QUEUE_NAME) - item = self._get_cache_item(key) - if not item: - item = create_item() + @classmethod + def _make_queue_job_id(cls, key: str) -> str: + return f"{cls._QUEUE_JOB_PREFIX_TASK}{key}" + + @staticmethod + def _drop_return_value(func: Callable[..., DataWithMime], *args: Any, **kwargs: Any): + func(*args, **kwargs) + + @classmethod + def _create_and_set_cache_item( + cls, + key: str, + create_callback: Callback, + cache_item_ttl: Optional[int] = None, + ) -> DataWithMime: + timestamp = django_tz.now() + item_data = create_callback() + item_data_bytes = item_data[0].getvalue() + item = (item_data[0], item_data[1], cls._get_checksum(item_data_bytes), timestamp) + if item_data_bytes: + cache = cls._cache() + with get_rq_lock_for_job( + cls._get_queue(), + key, + ): + cached_item = cache.get(key) + if cached_item is not None and timestamp <= cached_item[3]: + item = cached_item + else: + cache.set(key, item, timeout=cache_item_ttl or cache.default_timeout) + + return item + + def _create_cache_item( + self, + key: str, + create_callback: Callback, + *, + cache_item_ttl: Optional[int] = None, + ) -> _CacheItem: + slogger.glob.info(f"Starting to prepare chunk: key {key}") + if _is_run_inside_rq(): + item = self._create_and_set_cache_item( + key, + create_callback, + cache_item_ttl=cache_item_ttl, + ) else: - # compare checksum - item_data = item[0].getbuffer() if isinstance(item[0], io.BytesIO) else item[0] - item_checksum = item[2] if len(item) == 3 else None - if item_checksum != self._get_checksum(item_data): - slogger.glob.info(f"Recreating cache item {key} due to checksum mismatch") - item = create_item() + rq_job = enqueue_create_chunk_job( + queue=self._get_queue(), + rq_job_id=self._make_queue_job_id(key), + create_callback=Callback( + callable=self._drop_return_value, + args=[ + self._create_and_set_cache_item, + key, + create_callback, + ], + kwargs={ + "cache_item_ttl": cache_item_ttl, + }, + ), + ) + wait_for_rq_job(rq_job) + item = self._get_cache_item(key) + + slogger.glob.info(f"Ending to prepare chunk: key {key}") return item def _delete_cache_item(self, key: str): - try: - self._cache.delete(key) - slogger.glob.info(f"Removed chunk from the cache: key {key}") - except pickle.UnpicklingError: - slogger.glob.error(f"Failed to remove item from the cache: key {key}", exc_info=True) + self._cache().delete(key) + slogger.glob.info(f"Removed the cache key {key}") + + def _bulk_delete_cache_items(self, keys: Sequence[str]): + self._cache().delete_many(keys) + slogger.glob.info(f"Removed the cache keys {format_list(keys)}") def _get_cache_item(self, key: str) -> Optional[_CacheItem]: - slogger.glob.info(f"Starting to get chunk from cache: key {key}") try: - item = self._cache.get(key) + item = self._cache().get(key) except pickle.UnpicklingError: slogger.glob.error(f"Unable to get item from cache: key {key}", exc_info=True) item = None - slogger.glob.info(f"Ending to get chunk from cache: key {key}, is_cached {bool(item)}") + + if not item: + return None + + item_data = item[0].getbuffer() if isinstance(item[0], io.BytesIO) else item[0] + item_checksum = item[2] if len(item) == 4 else None + if item_checksum != self._get_checksum(item_data): + slogger.glob.info(f"Cache item {key} checksum mismatch") + return None return item - def _has_key(self, key: str) -> bool: - return self._cache.has_key(key) + def _validate_cache_item_timestamp( + self, item: _CacheItem, expected_timestamp: datetime + ) -> _CacheItem: + if item[3] < expected_timestamp: + raise CvatChunkTimestampMismatchError( + f"Cache timestamp mismatch. Item_ts: {item[3]}, expected_ts: {expected_timestamp}" + ) + return item + + @classmethod + def _has_key(cls, key: str) -> bool: + return cls._cache().has_key(key) + + @staticmethod def _make_cache_key_prefix( - self, obj: Union[models.Task, models.Segment, models.Job, models.CloudStorage] + obj: Union[models.Task, models.Segment, models.Job, models.CloudStorage] ) -> str: if isinstance(obj, models.Task): return f"task_{obj.id}" @@ -134,14 +324,15 @@ def _make_cache_key_prefix( else: assert False, f"Unexpected object type {type(obj)}" + @classmethod def _make_chunk_key( - self, + cls, db_obj: Union[models.Task, models.Segment, models.Job], chunk_number: int, *, quality: FrameQuality, ) -> str: - return f"{self._make_cache_key_prefix(db_obj)}_chunk_{chunk_number}_{quality}" + return f"{cls._make_cache_key_prefix(db_obj)}_chunk_{chunk_number}_{quality}" def _make_preview_key(self, db_obj: Union[models.Segment, models.CloudStorage]) -> str: return f"{self._make_cache_key_prefix(db_obj)}_preview" @@ -155,8 +346,8 @@ def _make_segment_task_chunk_key( ) -> str: return f"{self._make_cache_key_prefix(db_obj)}_task_chunk_{chunk_number}_{quality}" - def _make_context_image_preview_key(self, db_data: models.Data, frame_number: int) -> str: - return f"context_image_{db_data.id}_{frame_number}_preview" + def _make_frame_context_images_chunk_key(self, db_data: models.Data, frame_number: int) -> str: + return f"context_images_{db_data.id}_{frame_number}" @overload def _to_data_with_mime(self, cache_item: _CacheItem) -> DataWithMime: ... @@ -173,35 +364,47 @@ def _to_data_with_mime(self, cache_item: Optional[_CacheItem]) -> Optional[DataW def get_or_set_segment_chunk( self, db_segment: models.Segment, chunk_number: int, *, quality: FrameQuality ) -> DataWithMime: + + item = self._get_or_set_cache_item( + self._make_chunk_key(db_segment, chunk_number, quality=quality), + Callback( + callable=self.prepare_segment_chunk, + args=[db_segment, chunk_number], + kwargs={"quality": quality}, + ), + ) + db_segment.refresh_from_db(fields=["chunks_updated_date"]) + return self._to_data_with_mime( - self._get_or_set_cache_item( - key=self._make_chunk_key(db_segment, chunk_number, quality=quality), - create_callback=lambda: self.prepare_segment_chunk( - db_segment, chunk_number, quality=quality - ), - ) + self._validate_cache_item_timestamp(item, db_segment.chunks_updated_date) ) def get_task_chunk( self, db_task: models.Task, chunk_number: int, *, quality: FrameQuality ) -> Optional[DataWithMime]: return self._to_data_with_mime( - self._get_cache_item(key=self._make_chunk_key(db_task, chunk_number, quality=quality)) + self._get_cache_item( + key=self._make_chunk_key(db_task, chunk_number, quality=quality), + ) ) def get_or_set_task_chunk( self, db_task: models.Task, chunk_number: int, + set_callback: Callback, *, quality: FrameQuality, - set_callback: Callable[[], DataWithMime], ) -> DataWithMime: + + item = self._get_or_set_cache_item( + self._make_chunk_key(db_task, chunk_number, quality=quality), + set_callback, + ) + db_task.refresh_from_db(fields=["segment_set"]) + return self._to_data_with_mime( - self._get_or_set_cache_item( - key=self._make_chunk_key(db_task, chunk_number, quality=quality), - create_callback=set_callback, - ) + self._validate_cache_item_timestamp(item, db_task.get_chunks_updated_date()) ) def get_segment_task_chunk( @@ -209,7 +412,7 @@ def get_segment_task_chunk( ) -> Optional[DataWithMime]: return self._to_data_with_mime( self._get_cache_item( - key=self._make_segment_task_chunk_key(db_segment, chunk_number, quality=quality) + key=self._make_segment_task_chunk_key(db_segment, chunk_number, quality=quality), ) ) @@ -219,13 +422,17 @@ def get_or_set_segment_task_chunk( chunk_number: int, *, quality: FrameQuality, - set_callback: Callable[[], DataWithMime], + set_callback: Callback, ) -> DataWithMime: + + item = self._get_or_set_cache_item( + self._make_segment_task_chunk_key(db_segment, chunk_number, quality=quality), + set_callback, + ) + db_segment.refresh_from_db(fields=["chunks_updated_date"]) + return self._to_data_with_mime( - self._get_or_set_cache_item( - key=self._make_segment_task_chunk_key(db_segment, chunk_number, quality=quality), - create_callback=set_callback, - ) + self._validate_cache_item_timestamp(item, db_segment.chunks_updated_date), ) def get_or_set_selective_job_chunk( @@ -233,9 +440,13 @@ def get_or_set_selective_job_chunk( ) -> DataWithMime: return self._to_data_with_mime( self._get_or_set_cache_item( - key=self._make_chunk_key(db_job, chunk_number, quality=quality), - create_callback=lambda: self.prepare_masked_range_segment_chunk( - db_job.segment, chunk_number, quality=quality + self._make_chunk_key(db_job, chunk_number, quality=quality), + Callback( + callable=self.prepare_masked_range_segment_chunk, + args=[db_job.segment, chunk_number], + kwargs={ + "quality": quality, + }, ), ) ) @@ -244,7 +455,11 @@ def get_or_set_segment_preview(self, db_segment: models.Segment) -> DataWithMime return self._to_data_with_mime( self._get_or_set_cache_item( self._make_preview_key(db_segment), - create_callback=lambda: self._prepare_segment_preview(db_segment), + Callback( + callable=self._prepare_segment_preview, + args=[db_segment], + ), + cache_item_ttl=self._PREVIEW_TTL, ) ) @@ -255,6 +470,45 @@ def remove_segment_chunk( self._make_chunk_key(db_segment, chunk_number=chunk_number, quality=quality) ) + def remove_context_images_chunk(self, db_data: models.Data, frame_number: str) -> None: + self._delete_cache_item( + self._make_frame_context_images_chunk_key(db_data, frame_number=frame_number) + ) + + def remove_segments_chunks(self, params: Sequence[dict[str, Any]]) -> None: + """ + Removes several segment chunks from the cache. + + The function expects a sequence of remove_segment_chunk() parameters as dicts. + """ + # TODO: add a version of this function + # that removes related cache elements as well (context images, previews, ...) + # to provide encapsulation + + # TODO: add a generic bulk cleanup function for different objects, including related ones + # (likely a bulk key aggregator should be used inside to reduce requests count) + + keys_to_remove = [] + for item_params in params: + db_obj = item_params.pop("db_segment") + keys_to_remove.append(self._make_chunk_key(db_obj, **item_params)) + + self._bulk_delete_cache_items(keys_to_remove) + + def remove_context_images_chunks(self, params: Sequence[dict[str, Any]]) -> None: + """ + Removes several context image chunks from the cache. + + The function expects a sequence of remove_context_images_chunk() parameters as dicts. + """ + + keys_to_remove = [] + for item_params in params: + db_obj = item_params.pop("db_data") + keys_to_remove.append(self._make_frame_context_images_chunk_key(db_obj, **item_params)) + + self._bulk_delete_cache_items(keys_to_remove) + def get_cloud_preview(self, db_storage: models.CloudStorage) -> Optional[DataWithMime]: return self._to_data_with_mime(self._get_cache_item(self._make_preview_key(db_storage))) @@ -262,7 +516,11 @@ def get_or_set_cloud_preview(self, db_storage: models.CloudStorage) -> DataWithM return self._to_data_with_mime( self._get_or_set_cache_item( self._make_preview_key(db_storage), - create_callback=lambda: self._prepare_cloud_preview(db_storage), + Callback( + callable=self._prepare_cloud_preview, + args=[db_storage], + ), + cache_item_ttl=self._PREVIEW_TTL, ) ) @@ -271,13 +529,16 @@ def get_or_set_frame_context_images_chunk( ) -> DataWithMime: return self._to_data_with_mime( self._get_or_set_cache_item( - key=self._make_context_image_preview_key(db_data, frame_number), - create_callback=lambda: self.prepare_context_images_chunk(db_data, frame_number), + self._make_frame_context_images_chunk_key(db_data, frame_number), + Callback( + callable=self.prepare_context_images_chunk, + args=[db_data, frame_number], + ), ) ) + @staticmethod def _read_raw_images( - self, db_task: models.Task, frame_ids: Sequence[int], *, @@ -321,15 +582,13 @@ def _read_raw_images( cloud_storage_instance.bulk_download_to_dir( files=files_to_download, upload_dir=tmp_dir ) - media = preload_images(media) - for checksum, (_, fs_filename, _) in zip(checksums, media): - if checksum and not md5_hash(fs_filename) == checksum: + for checksum, media_item in zip(checksums, media): + if checksum and not md5_hash(media_item[1]) == checksum: slogger.cloud_storage[db_cloud_storage.id].warning( "Hash sums of files {} do not match".format(file_name) ) - - yield from media + yield load_image(media_item) else: requested_frame_iter = iter(frame_ids) next_requested_frame_id = next(requested_frame_iter, None) @@ -359,13 +618,17 @@ def _read_raw_images( assert next_requested_frame_id is None if db_task.dimension == models.DimensionType.DIM_2D: - media = preload_images(media) + media = map(load_image, media) yield from media + @staticmethod def _read_raw_frames( - self, db_task: models.Task, frame_ids: Sequence[int] - ) -> Generator[Tuple[Union[av.VideoFrame, PIL.Image.Image], str, str], None, None]: + db_task: Union[models.Task, int], frame_ids: Sequence[int] + ) -> Generator[tuple[Union[av.VideoFrame, PIL.Image.Image], str, str], None, None]: + if isinstance(db_task, int): + db_task = models.Task.objects.get(pk=db_task) + for prev_frame, cur_frame in pairwise(frame_ids): assert ( prev_frame <= cur_frame @@ -402,11 +665,14 @@ def _read_raw_frames( for frame_tuple in reader.iterate_frames(frame_filter=frame_ids): yield frame_tuple else: - yield from self._read_raw_images(db_task, frame_ids, manifest_path=manifest_path) + yield from MediaCache._read_raw_images(db_task, frame_ids, manifest_path=manifest_path) def prepare_segment_chunk( - self, db_segment: models.Segment, chunk_number: int, *, quality: FrameQuality + self, db_segment: Union[models.Segment, int], chunk_number: int, *, quality: FrameQuality ) -> DataWithMime: + if isinstance(db_segment, int): + db_segment = models.Segment.objects.get(pk=db_segment) + if db_segment.type == models.SegmentType.RANGE: return self.prepare_range_segment_chunk(db_segment, chunk_number, quality=quality) elif db_segment.type == models.SegmentType.SPECIFIC_FRAMES: @@ -429,10 +695,11 @@ def prepare_range_segment_chunk( return self.prepare_custom_range_segment_chunk(db_task, chunk_frame_ids, quality=quality) + @classmethod def prepare_custom_range_segment_chunk( - self, db_task: models.Task, frame_ids: Sequence[int], *, quality: FrameQuality + cls, db_task: models.Task, frame_ids: Sequence[int], *, quality: FrameQuality ) -> DataWithMime: - with closing(self._read_raw_frames(db_task, frame_ids=frame_ids)) as frame_iter: + with closing(cls._read_raw_frames(db_task, frame_ids=frame_ids)) as frame_iter: return prepare_chunk(frame_iter, quality=quality, db_task=db_task) def prepare_masked_range_segment_chunk( @@ -450,15 +717,19 @@ def prepare_masked_range_segment_chunk( db_task, chunk_frame_ids, chunk_number, quality=quality ) + @classmethod def prepare_custom_masked_range_segment_chunk( - self, - db_task: models.Task, + cls, + db_task: Union[models.Task, int], frame_ids: Collection[int], chunk_number: int, *, quality: FrameQuality, insert_placeholders: bool = False, ) -> DataWithMime: + if isinstance(db_task, int): + db_task = models.Task.objects.get(pk=db_task) + db_data = db_task.data frame_step = db_data.get_frame_step() @@ -495,8 +766,8 @@ def prepare_custom_masked_range_segment_chunk( if not list(chunk_frames): continue - chunk_available = self._has_key( - self._make_chunk_key(db_segment, i, quality=quality) + chunk_available = cls._has_key( + cls._make_chunk_key(db_segment, i, quality=quality) ) available_chunks.append(chunk_available) @@ -523,7 +794,7 @@ def get_frames(): frame_range = frame_ids if not use_cached_data: - frames_gen = self._read_raw_frames(db_task, frame_ids) + frames_gen = cls._read_raw_frames(db_task, frame_ids) frames_iter = iter(es.enter_context(closing(frames_gen))) for abs_frame_idx in frame_range: @@ -571,7 +842,10 @@ def get_frames(): buff.seek(0) return buff, get_chunk_mime_type_for_writer(writer) - def _prepare_segment_preview(self, db_segment: models.Segment) -> DataWithMime: + def _prepare_segment_preview(self, db_segment: Union[models.Segment, int]) -> DataWithMime: + if isinstance(db_segment, int): + db_segment = models.Segment.objects.get(pk=db_segment) + if db_segment.task.dimension == models.DimensionType.DIM_3D: # TODO preview = PIL.Image.open( @@ -593,7 +867,10 @@ def _prepare_segment_preview(self, db_segment: models.Segment) -> DataWithMime: return prepare_preview_image(preview) - def _prepare_cloud_preview(self, db_storage: models.CloudStorage) -> DataWithMime: + def _prepare_cloud_preview(self, db_storage: Union[models.CloudStorage, int]) -> DataWithMime: + if isinstance(db_storage, int): + db_storage = models.CloudStorage.objects.get(pk=db_storage) + storage = db_storage_to_storage_instance(db_storage) if not db_storage.manifests.count(): raise ValidationError("Cannot get the cloud storage preview. There is no manifest file") @@ -633,7 +910,12 @@ def _prepare_cloud_preview(self, db_storage: models.CloudStorage) -> DataWithMim image = PIL.Image.open(buff) return prepare_preview_image(image) - def prepare_context_images_chunk(self, db_data: models.Data, frame_number: int) -> DataWithMime: + def prepare_context_images_chunk( + self, db_data: Union[models.Data, int], frame_number: int + ) -> DataWithMime: + if isinstance(db_data, int): + db_data = models.Data.objects.get(pk=db_data) + zip_buffer = io.BytesIO() related_images = db_data.related_files.filter(images__frame=frame_number).all() @@ -669,7 +951,7 @@ def prepare_preview_image(image: PIL.Image.Image) -> DataWithMime: def prepare_chunk( - task_chunk_frames: Iterator[Tuple[Any, str, int]], + task_chunk_frames: Iterator[tuple[Any, str, int]], *, quality: FrameQuality, db_task: models.Task, @@ -679,7 +961,7 @@ def prepare_chunk( db_data = db_task.data - writer_classes: dict[FrameQuality, Type[IChunkWriter]] = { + writer_classes: dict[FrameQuality, type[IChunkWriter]] = { FrameQuality.COMPRESSED: ( Mpeg4CompressedChunkWriter if db_data.compressed_chunk_type == models.DataChoice.VIDEO @@ -712,7 +994,7 @@ def prepare_chunk( return buffer, get_chunk_mime_type_for_writer(writer_class) -def get_chunk_mime_type_for_writer(writer: Union[IChunkWriter, Type[IChunkWriter]]) -> str: +def get_chunk_mime_type_for_writer(writer: Union[IChunkWriter, type[IChunkWriter]]) -> str: if isinstance(writer, IChunkWriter): writer_class = type(writer) else: diff --git a/cvat/apps/engine/cloud_provider.py b/cvat/apps/engine/cloud_provider.py index 80f907bd2e19..b810304d73f9 100644 --- a/cvat/apps/engine/cloud_provider.py +++ b/cvat/apps/engine/cloud_provider.py @@ -7,11 +7,12 @@ import json import os import math -from abc import ABC, abstractmethod, abstractproperty +from abc import ABC, abstractmethod +from collections.abc import Iterator +from concurrent.futures import ThreadPoolExecutor, wait, FIRST_EXCEPTION from enum import Enum from io import BytesIO -from typing import Dict, List, Optional, Any, Callable, TypeVar, Iterator -from concurrent.futures import ThreadPoolExecutor, wait, FIRST_EXCEPTION +from typing import Optional, Any, Callable, TypeVar import boto3 from azure.core.exceptions import HttpResponseError, ResourceExistsError @@ -21,7 +22,6 @@ from botocore.client import Config from botocore.exceptions import ClientError from botocore.handlers import disable_signing -from datumaro.util import take_by # can be changed to itertools.batched after migration to python3.12 from django.conf import settings from google.cloud import storage from google.cloud.exceptions import Forbidden as GoogleCloudForbidden @@ -32,7 +32,7 @@ from cvat.apps.engine.log import ServerLogManager from cvat.apps.engine.models import CloudProviderChoice, CredentialsTypeChoice -from cvat.apps.engine.utils import get_cpu_number +from cvat.apps.engine.utils import get_cpu_number, take_by from cvat.utils.http import PROXIES_FOR_UNTRUSTED_URLS class NamedBytesIO(BytesIO): @@ -136,7 +136,8 @@ class _CloudStorage(ABC): def __init__(self, prefix: Optional[str] = None): self.prefix = prefix - @abstractproperty + @property + @abstractmethod def name(self): pass @@ -233,7 +234,7 @@ def optimally_image_download(self, key: str, chunk_size: int = 65536) -> NamedBy def bulk_download_to_memory( self, - files: List[str], + files: list[str], *, threads_number: Optional[int] = None, _use_optimal_downloading: bool = True, @@ -242,12 +243,12 @@ def bulk_download_to_memory( threads_number = normalize_threads_number(threads_number, len(files)) with ThreadPoolExecutor(max_workers=threads_number) as executor: - for batch_links in take_by(files, count=threads_number): + for batch_links in take_by(files, chunk_size=threads_number): yield from executor.map(func, batch_links) def bulk_download_to_dir( self, - files: List[str], + files: list[str], upload_dir: str, *, threads_number: Optional[int] = None, @@ -275,7 +276,7 @@ def _list_raw_content_on_one_page( prefix: str = "", next_token: Optional[str] = None, page_size: int = settings.BUCKET_CONTENT_MAX_PAGE_SIZE, - ) -> Dict: + ) -> dict: pass def list_files_on_one_page( @@ -285,7 +286,7 @@ def list_files_on_one_page( page_size: int = settings.BUCKET_CONTENT_MAX_PAGE_SIZE, _use_flat_listing: bool = False, _use_sort: bool = False, - ) -> Dict: + ) -> dict: if self.prefix and prefix and not (self.prefix.startswith(prefix) or prefix.startswith(self.prefix)): return { @@ -338,7 +339,7 @@ def list_files( self, prefix: str = "", _use_flat_listing: bool = False, - ) -> List[str]: + ) -> list[str]: all_files = [] next_token = None while True: @@ -350,7 +351,8 @@ def list_files( return all_files - @abstractproperty + @property + @abstractmethod def supported_actions(self): pass @@ -366,7 +368,7 @@ def get_cloud_storage_instance( cloud_provider: CloudProviderChoice, resource: str, credentials: str, - specific_attributes: Optional[Dict[str, Any]] = None, + specific_attributes: Optional[dict[str, Any]] = None, ): instance = None if cloud_provider == CloudProviderChoice.AWS_S3: @@ -530,7 +532,7 @@ def _list_raw_content_on_one_page( prefix: str = "", next_token: Optional[str] = None, page_size: int = settings.BUCKET_CONTENT_MAX_PAGE_SIZE, - ) -> Dict: + ) -> dict: # The structure of response looks like this: # { # 'CommonPrefixes': [{'Prefix': 'sub/'}], @@ -737,7 +739,7 @@ def _list_raw_content_on_one_page( prefix: str = "", next_token: Optional[str] = None, page_size: int = settings.BUCKET_CONTENT_MAX_PAGE_SIZE, - ) -> Dict: + ) -> dict: page = self._client.walk_blobs( maxresults=page_size, results_per_page=page_size, delimiter='/', **({'name_starts_with': prefix} if prefix else {}) @@ -853,7 +855,7 @@ def _list_raw_content_on_one_page( prefix: str = "", next_token: Optional[str] = None, page_size: int = settings.BUCKET_CONTENT_MAX_PAGE_SIZE, - ) -> Dict: + ) -> dict: iterator = self._client.list_blobs( bucket_or_name=self.name, max_results=page_size, page_size=page_size, fields='items(name),nextPageToken,prefixes', # https://cloud.google.com/storage/docs/json_api/v1/parameters#fields diff --git a/cvat/apps/engine/default_settings.py b/cvat/apps/engine/default_settings.py index 826fe1c9bef2..f853d3bc8219 100644 --- a/cvat/apps/engine/default_settings.py +++ b/cvat/apps/engine/default_settings.py @@ -2,9 +2,13 @@ # # SPDX-License-Identifier: MIT +import logging as log import os from attrs.converters import to_bool +from django.core.exceptions import ImproperlyConfigured + +logger = log.getLogger("cvat") MEDIA_CACHE_ALLOW_STATIC_CACHE = to_bool(os.getenv("CVAT_ALLOW_STATIC_CACHE", False)) """ @@ -14,3 +18,76 @@ When enabled, this option can increase data access speed and reduce server load, but significantly increase disk space occupied by tasks. """ + +CVAT_CHUNK_CREATE_TIMEOUT = 50 +""" +Sets the chunk preparation timeout in seconds after which the backend will respond with 429 code. +""" + +CVAT_CHUNK_CREATE_CHECK_INTERVAL = 0.2 +""" +Sets the frequency of checking the readiness of the chunk +""" +default_export_cache_ttl = 60 * 60 * 24 +default_export_cache_lock_ttl = 30 +default_export_cache_lock_acquisition_timeout = 50 +default_export_locked_retry_interval = 60 + +EXPORT_CACHE_TTL = os.getenv("CVAT_DATASET_CACHE_TTL") +"Base lifetime for cached export files, in seconds" + +if EXPORT_CACHE_TTL is not None: + EXPORT_CACHE_TTL = int(EXPORT_CACHE_TTL) + logger.warning( + "The CVAT_DATASET_CACHE_TTL is deprecated, use CVAT_EXPORT_CACHE_TTL instead", + ) +else: + EXPORT_CACHE_TTL = int(os.getenv("CVAT_EXPORT_CACHE_TTL", default_export_cache_ttl)) + + +EXPORT_CACHE_LOCK_TTL = os.getenv("CVAT_DATASET_EXPORT_LOCK_TTL") +"Default lifetime for the export cache lock, in seconds." + +if EXPORT_CACHE_LOCK_TTL is not None: + EXPORT_CACHE_LOCK_TTL = int(EXPORT_CACHE_LOCK_TTL) + logger.warning( + "The CVAT_DATASET_EXPORT_LOCK_TTL is deprecated, use CVAT_EXPORT_CACHE_LOCK_TTL instead", + ) +else: + EXPORT_CACHE_LOCK_TTL = int( + os.getenv("CVAT_EXPORT_CACHE_LOCK_TTL", default_export_cache_lock_ttl) + ) + +EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT = os.getenv("CVAT_DATASET_CACHE_LOCK_TIMEOUT") +"Timeout for cache lock acquiring, in seconds" + +if EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT is not None: + EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT = int(EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT) + logger.warning( + "The CVAT_DATASET_CACHE_LOCK_TIMEOUT is deprecated, " + "use CVAT_EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT instead", + ) +else: + EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT = int( + os.getenv( + "CVAT_EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT", + default_export_cache_lock_acquisition_timeout, + ) + ) + +if EXPORT_CACHE_LOCK_ACQUISITION_TIMEOUT <= EXPORT_CACHE_LOCK_TTL: + raise ImproperlyConfigured("Lock acquisition timeout must be more than lock TTL") + +EXPORT_LOCKED_RETRY_INTERVAL = os.getenv("CVAT_DATASET_EXPORT_LOCKED_RETRY_INTERVAL") +"Retry interval for cases the export cache lock was unavailable, in seconds" + +if EXPORT_LOCKED_RETRY_INTERVAL is not None: + EXPORT_LOCKED_RETRY_INTERVAL = int(EXPORT_LOCKED_RETRY_INTERVAL) + logger.warning( + "The CVAT_DATASET_EXPORT_LOCKED_RETRY_INTERVAL is deprecated, " + "use CVAT_EXPORT_LOCKED_RETRY_INTERVAL instead", + ) +else: + EXPORT_LOCKED_RETRY_INTERVAL = int( + os.getenv("CVAT_EXPORT_LOCKED_RETRY_INTERVAL", default_export_locked_retry_interval) + ) diff --git a/cvat/apps/engine/field_validation.py b/cvat/apps/engine/field_validation.py index bbfa58b5f3ea..e411284b3cde 100644 --- a/cvat/apps/engine/field_validation.py +++ b/cvat/apps/engine/field_validation.py @@ -2,7 +2,8 @@ # # SPDX-License-Identifier: MIT -from typing import Any, Sequence +from collections.abc import Sequence +from typing import Any from rest_framework import serializers diff --git a/cvat/apps/engine/filters.py b/cvat/apps/engine/filters.py index 663b6554e168..32355629d06d 100644 --- a/cvat/apps/engine/filters.py +++ b/cvat/apps/engine/filters.py @@ -3,8 +3,9 @@ # # SPDX-License-Identifier: MIT -from typing import Any, Dict, Tuple, List, Iterator, Optional, Iterable +from collections.abc import Iterator, Iterable from functools import reduce +from typing import Any, Optional import operator import json @@ -25,7 +26,7 @@ DEFAULT_FILTER_FIELDS_ATTR = 'filter_fields' DEFAULT_LOOKUP_MAP_ATTR = 'lookup_fields' -def get_lookup_fields(view, fields: Optional[Iterator[str]] = None) -> Dict[str, str]: +def get_lookup_fields(view, fields: Optional[Iterator[str]] = None) -> dict[str, str]: if fields is None: fields = getattr(view, DEFAULT_FILTER_FIELDS_ATTR, None) or [] @@ -134,7 +135,7 @@ def get_schema_operation_parameters(self, view): }] if ordering_fields else [] class JsonLogicFilter(filters.BaseFilterBackend): - Rules = Dict[str, Any] + Rules = dict[str, Any] filter_param = 'filter' filter_title = _('Filter') filter_description = _(dedent(""" @@ -191,7 +192,7 @@ def _parse_query(self, json_rules: str) -> Rules: return rules def apply_filter(self, - queryset: QuerySet, parsed_rules: Rules, *, lookup_fields: Dict[str, Any] + queryset: QuerySet, parsed_rules: Rules, *, lookup_fields: dict[str, Any] ) -> QuerySet: try: q_object = self._build_Q(parsed_rules, lookup_fields) @@ -362,7 +363,7 @@ class DotDict(dict): __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ - def __init__(self, dct: Dict): + def __init__(self, dct: dict): for key, value in dct.items(): if isinstance(value, dict): value = self.__class__(value) @@ -454,7 +455,7 @@ class NonModelOrderingFilter(OrderingFilter, _NestedAttributeHandler): ?sort=-field1,-field2 """ - def get_ordering(self, request, queryset, view) -> Tuple[List[str], bool]: + def get_ordering(self, request, queryset, view) -> tuple[list[str], bool]: ordering = super().get_ordering(request, queryset, view) result, reverse = [], False for field in ordering: diff --git a/cvat/apps/engine/frame_provider.py b/cvat/apps/engine/frame_provider.py index f397f0d568b1..6b756543c7f3 100644 --- a/cvat/apps/engine/frame_provider.py +++ b/cvat/apps/engine/frame_provider.py @@ -10,6 +10,8 @@ import math from abc import ABCMeta, abstractmethod from bisect import bisect +from collections import OrderedDict +from collections.abc import Iterator, Sequence from dataclasses import dataclass from enum import Enum, auto from io import BytesIO @@ -17,11 +19,7 @@ Any, Callable, Generic, - Iterator, Optional, - Sequence, - Tuple, - Type, TypeVar, Union, overload, @@ -30,13 +28,12 @@ import av import cv2 import numpy as np -from datumaro.util import take_by from django.conf import settings from PIL import Image from rest_framework.exceptions import ValidationError from cvat.apps.engine import models -from cvat.apps.engine.cache import DataWithMime, MediaCache, prepare_chunk +from cvat.apps.engine.cache import Callback, DataWithMime, MediaCache, prepare_chunk from cvat.apps.engine.media_extractors import ( FrameQuality, IMediaReader, @@ -45,6 +42,7 @@ ZipReader, ) from cvat.apps.engine.mime_types import mimetypes +from cvat.apps.engine.utils import take_by _T = TypeVar("_T") @@ -52,7 +50,7 @@ class _ChunkLoader(metaclass=ABCMeta): def __init__( self, - reader_class: Type[IMediaReader], + reader_class: type[IMediaReader], *, reader_params: Optional[dict] = None, ) -> None: @@ -61,7 +59,7 @@ def __init__( self.reader_class = reader_class self.reader_params = reader_params - def load(self, chunk_id: int) -> RandomAccessIterator[Tuple[Any, str, int]]: + def load(self, chunk_id: int) -> RandomAccessIterator[tuple[Any, str, int]]: if self.chunk_id != chunk_id: self.unload() @@ -87,7 +85,7 @@ def read_chunk(self, chunk_id: int) -> DataWithMime: ... class _FileChunkLoader(_ChunkLoader): def __init__( self, - reader_class: Type[IMediaReader], + reader_class: type[IMediaReader], get_chunk_path_callback: Callable[[int], str], *, reader_params: Optional[dict] = None, @@ -107,7 +105,7 @@ def read_chunk(self, chunk_id: int) -> DataWithMime: class _BufferChunkLoader(_ChunkLoader): def __init__( self, - reader_class: Type[IMediaReader], + reader_class: type[IMediaReader], get_chunk_callback: Callable[[int], DataWithMime], *, reader_params: Optional[dict] = None, @@ -153,7 +151,7 @@ def _av_frame_to_png_bytes(cls, av_frame: av.VideoFrame) -> BytesIO: return BytesIO(result.tobytes()) def _convert_frame( - self, frame: Any, reader_class: Type[IMediaReader], out_type: FrameOutputType + self, frame: Any, reader_class: type[IMediaReader], out_type: FrameOutputType ) -> AnyFrame: if out_type == FrameOutputType.BUFFER: return ( @@ -310,38 +308,60 @@ def get_chunk( # The requested frames match one of the job chunks, we can use it directly return segment_frame_provider.get_chunk(matching_chunk_index, quality=quality) - def _set_callback() -> DataWithMime: - # Create and return a joined / cleaned chunk - task_chunk_frames = {} - for db_segment in matching_segments: - segment_frame_provider = SegmentFrameProvider(db_segment) - segment_frame_set = db_segment.frame_set - - for task_chunk_frame_id in sorted(task_chunk_frame_set): - if ( - task_chunk_frame_id not in segment_frame_set - or task_chunk_frame_id in task_chunk_frames - ): - continue - - frame, frame_name, _ = segment_frame_provider._get_raw_frame( - self.get_rel_frame_number(task_chunk_frame_id), quality=quality - ) - task_chunk_frames[task_chunk_frame_id] = (frame, frame_name, None) - - return prepare_chunk( - task_chunk_frames.values(), - quality=quality, - db_task=self._db_task, - dump_unchanged=True, - ) - buffer, mime_type = cache.get_or_set_task_chunk( - self._db_task, chunk_number, quality=quality, set_callback=_set_callback + self._db_task, + chunk_number, + quality=quality, + set_callback=Callback( + callable=self._get_chunk_create_callback, + args=[ + self._db_task, + matching_segments, + {f: self.get_rel_frame_number(f) for f in task_chunk_frame_set}, + quality, + ], + ), ) return return_type(data=buffer, mime=mime_type) + @staticmethod + def _get_chunk_create_callback( + db_task: Union[models.Task, int], + matching_segments: list[models.Segment], + task_chunk_frames_with_rel_numbers: dict[int, int], + quality: FrameQuality, + ) -> DataWithMime: + # Create and return a joined / cleaned chunk + task_chunk_frames = OrderedDict() + for db_segment in matching_segments: + if isinstance(db_segment, int): + db_segment = models.Segment.objects.get(pk=db_segment) + segment_frame_provider = SegmentFrameProvider(db_segment) + segment_frame_set = db_segment.frame_set + + for task_chunk_frame_id in sorted(task_chunk_frames_with_rel_numbers.keys()): + if ( + task_chunk_frame_id not in segment_frame_set + or task_chunk_frame_id in task_chunk_frames + ): + continue + + frame, frame_name, _ = segment_frame_provider._get_raw_frame( + task_chunk_frames_with_rel_numbers[task_chunk_frame_id], quality=quality + ) + task_chunk_frames[task_chunk_frame_id] = (frame, frame_name, None) + + if isinstance(db_task, int): + db_task = models.Task.objects.get(pk=db_task) + + return prepare_chunk( + task_chunk_frames.values(), + quality=quality, + db_task=db_task, + dump_unchanged=True, + ) + def get_frame( self, frame_number: int, @@ -369,7 +389,7 @@ def iterate_frames( quality: FrameQuality = FrameQuality.ORIGINAL, out_type: FrameOutputType = FrameOutputType.BUFFER, ) -> Iterator[DataWithMeta[AnyFrame]]: - frame_range = itertools.count(start_frame, self._db_task.data.get_frame_step()) + frame_range = itertools.count(start_frame) if stop_frame: frame_range = itertools.takewhile(lambda x: x <= stop_frame, frame_range) @@ -377,7 +397,10 @@ def iterate_frames( db_segment_frame_set = None db_segment_frame_provider = None for idx in frame_range: - if db_segment and idx not in db_segment_frame_set: + if ( + db_segment + and self._get_abs_frame_number(self._db_task.data, idx) not in db_segment_frame_set + ): db_segment = None db_segment_frame_set = None db_segment_frame_provider = None @@ -425,7 +448,7 @@ def __init__(self, db_segment: models.Segment) -> None: db_data = db_segment.task.data - reader_class: dict[models.DataChoice, Tuple[Type[IMediaReader], Optional[dict]]] = { + reader_class: dict[models.DataChoice, tuple[type[IMediaReader], Optional[dict]]] = { models.DataChoice.IMAGESET: (ZipReader, None), models.DataChoice.VIDEO: ( VideoReader, @@ -497,7 +520,7 @@ def get_frame_index(self, frame_number: int) -> Optional[int]: return frame_index - def validate_frame_number(self, frame_number: int) -> Tuple[int, int, int]: + def validate_frame_number(self, frame_number: int) -> tuple[int, int, int]: frame_index = self.get_frame_index(frame_number) if frame_index is None: raise ValidationError(f"Incorrect requested frame number: {frame_number}") @@ -550,7 +573,7 @@ def _get_raw_frame( frame_number: int, *, quality: FrameQuality = FrameQuality.ORIGINAL, - ) -> Tuple[Any, str, Type[IMediaReader]]: + ) -> tuple[Any, str, type[IMediaReader]]: _, chunk_number, frame_offset = self.validate_frame_number(frame_number) loader = self._loaders[quality] chunk_reader = loader.load(chunk_number) @@ -661,35 +684,55 @@ def get_chunk( if matching_chunk is not None: return self.get_chunk(matching_chunk, quality=quality) - def _set_callback() -> DataWithMime: - # Create and return a joined / cleaned chunk - segment_chunk_frame_ids = sorted( - task_chunk_frame_set.intersection(self._db_segment.frame_set) - ) - - if self._db_segment.type == models.SegmentType.RANGE: - return cache.prepare_custom_range_segment_chunk( - db_task=self._db_segment.task, - frame_ids=segment_chunk_frame_ids, - quality=quality, - ) - elif self._db_segment.type == models.SegmentType.SPECIFIC_FRAMES: - return cache.prepare_custom_masked_range_segment_chunk( - db_task=self._db_segment.task, - frame_ids=segment_chunk_frame_ids, - chunk_number=chunk_number, - quality=quality, - insert_placeholders=True, - ) - else: - assert False + segment_chunk_frame_ids = sorted( + task_chunk_frame_set.intersection(self._db_segment.frame_set) + ) buffer, mime_type = cache.get_or_set_segment_task_chunk( - self._db_segment, chunk_number, quality=quality, set_callback=_set_callback + self._db_segment, + chunk_number, + quality=quality, + set_callback=Callback( + callable=self._get_chunk_create_callback, + args=[ + self._db_segment, + segment_chunk_frame_ids, + chunk_number, + quality, + ], + ), ) return return_type(data=buffer, mime=mime_type) + @staticmethod + def _get_chunk_create_callback( + db_segment: Union[models.Segment, int], + segment_chunk_frame_ids: list[int], + chunk_number: int, + quality: FrameQuality, + ) -> DataWithMime: + # Create and return a joined / cleaned chunk + if isinstance(db_segment, int): + db_segment = models.Segment.objects.get(pk=db_segment) + + if db_segment.type == models.SegmentType.RANGE: + return MediaCache.prepare_custom_range_segment_chunk( + db_task=db_segment.task, + frame_ids=segment_chunk_frame_ids, + quality=quality, + ) + elif db_segment.type == models.SegmentType.SPECIFIC_FRAMES: + return MediaCache.prepare_custom_masked_range_segment_chunk( + db_task=db_segment.task, + frame_ids=segment_chunk_frame_ids, + chunk_number=chunk_number, + quality=quality, + insert_placeholders=True, + ) + else: + assert False + @overload def make_frame_provider(data_source: models.Job) -> JobFrameProvider: ... diff --git a/cvat/apps/engine/lazy_list.py b/cvat/apps/engine/lazy_list.py index 61d2c8956209..e8a36a09641f 100644 --- a/cvat/apps/engine/lazy_list.py +++ b/cvat/apps/engine/lazy_list.py @@ -2,9 +2,10 @@ # # SPDX-License-Identifier: MIT +from collections.abc import Iterator from functools import wraps from itertools import islice -from typing import Any, Callable, Iterator, TypeVar, overload +from typing import Any, Callable, TypeVar, overload import attrs from attr import field diff --git a/cvat/apps/engine/location.py b/cvat/apps/engine/location.py index ac6ab77dc073..c9e216e24627 100644 --- a/cvat/apps/engine/location.py +++ b/cvat/apps/engine/location.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: MIT from enum import Enum -from typing import Any, Dict, Union, Optional +from typing import Any, Union, Optional from cvat.apps.engine.models import Location, Project, Task, Job @@ -15,11 +15,11 @@ def __str__(self): return self.value def get_location_configuration( - query_params: Dict[str, Any], + query_params: dict[str, Any], field_name: str, *, db_instance: Optional[Union[Project, Task, Job]] = None, -) -> Dict[str, Any]: +) -> dict[str, Any]: location = query_params.get('location') # handle resource import diff --git a/cvat/apps/engine/management/__init__.py b/cvat/apps/engine/management/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/cvat/apps/engine/management/commands/__init__.py b/cvat/apps/engine/management/commands/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/cvat/apps/engine/management/commands/syncperiodicjobs.py b/cvat/apps/engine/management/commands/syncperiodicjobs.py new file mode 100644 index 000000000000..097f468b337f --- /dev/null +++ b/cvat/apps/engine/management/commands/syncperiodicjobs.py @@ -0,0 +1,76 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +from argparse import ArgumentParser +from collections import defaultdict + +from django.core.management.base import BaseCommand +from django.conf import settings + +import django_rq + +class Command(BaseCommand): + help = "Synchronize periodic jobs in Redis with the project configuration" + + _PERIODIC_JOBS_KEY_PREFIX = 'cvat:utils:periodic-jobs:' + + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument('--clear', action='store_true', help='Remove jobs from Redis instead of updating them') + + def handle(self, *args, **options): + configured_jobs = defaultdict(dict) + + if not options["clear"]: + for job in settings.PERIODIC_RQ_JOBS: + configured_jobs[job['queue']][job['id']] = job + + for queue_name in settings.RQ_QUEUES: + self.stdout.write(f"Processing queue {queue_name}...") + + periodic_jobs_key = self._PERIODIC_JOBS_KEY_PREFIX + queue_name + + queue = django_rq.get_queue(queue_name) + scheduler = django_rq.get_scheduler(queue_name, queue=queue) + + stored_jobs_for_queue = { + member.decode('UTF-8') for member in queue.connection.smembers(periodic_jobs_key) + } + configured_jobs_for_queue = configured_jobs[queue_name] + + # Delete jobs that are no longer in the configuration + jobs_to_delete = stored_jobs_for_queue.difference(configured_jobs_for_queue.keys()) + + for job_id in jobs_to_delete: + self.stdout.write(f"Deleting job {job_id}...") + scheduler.cancel(job_id) + if job := queue.fetch_job(job_id): + job.delete() + + queue.connection.srem(periodic_jobs_key, job_id) + + # Add/update jobs from the configuration + for job_definition in configured_jobs_for_queue.values(): + job_id = job_definition['id'] + + if job := queue.fetch_job(job_id): + if ( + job.func_name == job_definition['func'] + and job.meta.get('cron_string') == job_definition['cron_string'] + ): + self.stdout.write(f"Job {job_id} is unchanged") + queue.connection.sadd(periodic_jobs_key, job_id) + continue + + self.stdout.write(f"Recreating job {job_id}...") + job.delete() + else: + self.stdout.write(f"Creating job {job_id}...") + + scheduler.cron( + cron_string=job_definition['cron_string'], + func=job_definition['func'], + id=job_id, + ) + + queue.connection.sadd(periodic_jobs_key, job_id) diff --git a/cvat/apps/engine/media_extractors.py b/cvat/apps/engine/media_extractors.py index 9c1d2deca189..ae1c7b9f7da8 100644 --- a/cvat/apps/engine/media_extractors.py +++ b/cvat/apps/engine/media_extractors.py @@ -15,13 +15,11 @@ import struct from abc import ABC, abstractmethod from bisect import bisect -from contextlib import ExitStack, closing, contextmanager +from collections.abc import Generator, Iterable, Iterator, Sequence +from contextlib import AbstractContextManager, ExitStack, closing, contextmanager from dataclasses import dataclass from enum import IntEnum -from typing import ( - Any, Callable, ContextManager, Generator, Iterable, Iterator, Optional, Protocol, - Sequence, Tuple, TypeVar, Union -) +from typing import Any, Callable, Optional, Protocol, TypeVar, Union import av import av.codec @@ -101,6 +99,12 @@ def image_size_within_orientation(img: Image.Image): def has_exif_rotation(img: Image.Image): return img.getexif().get(ORIENTATION_EXIF_TAG, ORIENTATION.NORMAL_HORIZONTAL) != ORIENTATION.NORMAL_HORIZONTAL + +def load_image(image: tuple[str, str, str])-> tuple[Image.Image, str, str]: + with Image.open(image[0]) as pil_img: + pil_img.load() + return pil_img, image[1], image[2] + _T = TypeVar("_T") @@ -539,7 +543,9 @@ def extract(self): class _AvVideoReading: @contextmanager - def read_av_container(self, source: Union[str, io.BytesIO]) -> av.container.InputContainer: + def read_av_container( + self, source: Union[str, io.BytesIO] + ) -> Generator[av.container.InputContainer, None, None]: if isinstance(source, io.BytesIO): source.seek(0) # required for re-reading @@ -552,6 +558,8 @@ def read_av_container(self, source: Union[str, io.BytesIO]) -> av.container.Inpu for stream in container.streams: context = stream.codec_context if context and context.is_open: + # Currently, context closing may get stuck on some videos for an unknown reason, + # so the thread_type == 'AUTO' setting is disabled for future investigation context.close() if container.open_files: @@ -583,7 +591,7 @@ def __init__( stop: Optional[int] = None, dimension: DimensionType = DimensionType.DIM_2D, *, - allow_threading: bool = True, + allow_threading: bool = False, ): super().__init__( source_path=source_path, @@ -602,7 +610,7 @@ def iterate_frames( *, frame_filter: Union[bool, Iterable[int]] = True, video_stream: Optional[av.video.stream.VideoStream] = None, - ) -> Iterator[Tuple[av.VideoFrame, str, int]]: + ) -> Iterator[tuple[av.VideoFrame, str, int]]: """ If provided, frame_filter must be an ordered sequence in the ascending order. 'True' means using the frames configured in the reader object. @@ -635,6 +643,8 @@ def iterate_frames( if self.allow_threading: video_stream.thread_type = 'AUTO' + else: + video_stream.thread_type = 'NONE' frame_counter = itertools.count() with closing(self._decode_stream(container, video_stream)) as stream_decoder: @@ -661,14 +671,14 @@ def iterate_frames( if next_frame_filter_frame is None: return - def __iter__(self) -> Iterator[Tuple[av.VideoFrame, str, int]]: + def __iter__(self) -> Iterator[tuple[av.VideoFrame, str, int]]: return self.iterate_frames() def get_progress(self, pos): duration = self._get_duration() return pos / duration if duration else None - def _read_av_container(self) -> ContextManager[av.container.InputContainer]: + def _read_av_container(self) -> AbstractContextManager[av.container.InputContainer]: return _AvVideoReading().read_av_container(self._source_path[0]) def _decode_stream( @@ -759,7 +769,7 @@ def __init__(self, manifest_path: str, source_path: str, *, allow_threading: boo self.allow_threading = allow_threading - def _read_av_container(self) -> ContextManager[av.container.InputContainer]: + def _read_av_container(self) -> AbstractContextManager[av.container.InputContainer]: return _AvVideoReading().read_av_container(self.source_path) def _decode_stream( @@ -795,6 +805,8 @@ def iterate_frames(self, *, frame_filter: Iterable[int]) -> Iterable[av.VideoFra video_stream = container.streams.video[0] if self.allow_threading: video_stream.thread_type = 'AUTO' + else: + video_stream.thread_type = 'NONE' container.seek(offset=start_decode_timestamp, stream=video_stream) @@ -829,13 +841,15 @@ def _compress_image(source_image: av.VideoFrame | io.IOBase | Image.Image, quali if isinstance(source_image, av.VideoFrame): image = source_image.to_image() elif isinstance(source_image, io.IOBase): - with Image.open(source_image) as _img: - image = ImageOps.exif_transpose(_img) + image, _, _ = load_image((source_image, None, None)) elif isinstance(source_image, Image.Image): - image = ImageOps.exif_transpose(source_image) + image = source_image assert image is not None + if has_exif_rotation(image): + image = ImageOps.exif_transpose(image) + # Ensure image data fits into 8bit per pixel before RGB conversion as PIL clips values on conversion if image.mode == "I": # Image mode is 32bit integer pixels. @@ -860,16 +874,14 @@ def _compress_image(source_image: av.VideoFrame | io.IOBase | Image.Image, quali image = Image.fromarray(image, mode="L") # 'L' := Unsigned Integer 8, Grayscale image = ImageOps.equalize(image) # The Images need equalization. High resolution with 16-bit but only small range that actually contains information - converted_image = image.convert('RGB') + if image.mode != 'RGB' and image.mode != 'L': + image = image.convert('RGB') - try: - buf = io.BytesIO() - converted_image.save(buf, format='JPEG', quality=quality, optimize=True) - buf.seek(0) - width, height = converted_image.size - return width, height, buf - finally: - converted_image.close() + buf = io.BytesIO() + image.save(buf, format='JPEG', quality=quality, optimize=True) + buf.seek(0) + + return image.width, image.height, buf @abstractmethod def save_as_chunk(self, images, chunk_path): @@ -1018,11 +1030,11 @@ def _add_video_stream(self, container: av.container.OutputContainer, w, h, rate, return video_stream - FrameDescriptor = Tuple[av.VideoFrame, Any, Any] + FrameDescriptor = tuple[av.VideoFrame, Any, Any] def _peek_first_frame( self, frame_iter: Iterator[FrameDescriptor] - ) -> Tuple[Optional[FrameDescriptor], Iterator[FrameDescriptor]]: + ) -> tuple[Optional[FrameDescriptor], Iterator[FrameDescriptor]]: "Gets the first frame and returns the same full iterator" if not hasattr(frame_iter, '__next__'): @@ -1033,7 +1045,7 @@ def _peek_first_frame( def save_as_chunk( self, images: Iterator[FrameDescriptor], chunk_path: str - ) -> Sequence[Tuple[int, int]]: + ) -> Sequence[tuple[int, int]]: first_frame, images = self._peek_first_frame(images) if not first_frame: raise Exception('no images to save') diff --git a/cvat/apps/engine/migrations/0024_auto_20191023_1025.py b/cvat/apps/engine/migrations/0024_auto_20191023_1025.py index c8aefe7b7774..1946e08e47e2 100644 --- a/cvat/apps/engine/migrations/0024_auto_20191023_1025.py +++ b/cvat/apps/engine/migrations/0024_auto_20191023_1025.py @@ -79,7 +79,7 @@ def migrate_task_data(db_task_id, db_data_id, original_video, original_images, s compressed_chunk_path = os.path.join(compressed_cache_dir, '{}.zip'.format(chunk_idx)) compressed_chunk_writer.save_as_chunk(chunk_images, compressed_chunk_path) - preview = reader.get_preview() + preview = reader.get_preview(0) preview.save(os.path.join(db_data_dir, 'preview.jpeg')) else: original_chunk_writer = ZipChunkWriter(100) @@ -146,7 +146,7 @@ def migrate_task_data(db_task_id, db_data_id, original_video, original_images, s original_chunk_path = os.path.join(original_cache_dir, '{}.zip'.format(chunk_idx)) original_chunk_writer.save_as_chunk(chunk_images, original_chunk_path) - preview = reader.get_preview() + preview = reader.get_preview(0) preview.save(os.path.join(db_data_dir, 'preview.jpeg')) shutil.rmtree(old_db_task_dir) return_dict[db_task_id] = (True, '') diff --git a/cvat/apps/engine/migrations/0034_auto_20201125_1426.py b/cvat/apps/engine/migrations/0034_auto_20201125_1426.py index 457861a3942c..311b21655b9d 100644 --- a/cvat/apps/engine/migrations/0034_auto_20201125_1426.py +++ b/cvat/apps/engine/migrations/0034_auto_20201125_1426.py @@ -6,12 +6,12 @@ import django.db.models.deletion def create_profile(apps, schema_editor): - User = apps.get_model('auth', 'User') - Profile = apps.get_model('engine', 'Profile') - for user in User.objects.all(): - profile = Profile() - profile.user = user - profile.save() + User = apps.get_model('auth', 'User') + Profile = apps.get_model('engine', 'Profile') + for user in User.objects.all(): + profile = Profile() + profile.user = user + profile.save() class Migration(migrations.Migration): diff --git a/cvat/apps/engine/migrations/0038_manifest.py b/cvat/apps/engine/migrations/0038_manifest.py index ec96045ae69c..002a0326c2dc 100644 --- a/cvat/apps/engine/migrations/0038_manifest.py +++ b/cvat/apps/engine/migrations/0038_manifest.py @@ -110,7 +110,7 @@ def migrate2manifest(apps, shema_editor): if db_data.storage == StorageChoice.SHARE: def _get_frame_step(str_): - match = search("step\s*=\s*([1-9]\d*)", str_) + match = search(r"step\s*=\s*([1-9]\d*)", str_) return int(match.group(1)) if match else 1 logger.info('Data is located on the share, metadata update has been started') manifest.step = _get_frame_step(db_data.frame_filter) diff --git a/cvat/apps/engine/migrations/0083_move_to_segment_chunks.py b/cvat/apps/engine/migrations/0083_move_to_segment_chunks.py index 8ef887d4c54b..4138d9295c87 100644 --- a/cvat/apps/engine/migrations/0083_move_to_segment_chunks.py +++ b/cvat/apps/engine/migrations/0083_move_to_segment_chunks.py @@ -1,8 +1,9 @@ # Generated by Django 4.2.13 on 2024-08-12 09:49 import os +from collections.abc import Iterable from itertools import islice -from typing import Iterable, TypeVar +from typing import TypeVar from django.db import migrations diff --git a/cvat/apps/engine/migrations/0084_honeypot_support.py b/cvat/apps/engine/migrations/0084_honeypot_support.py index 721d400ec386..fb44839c50bd 100644 --- a/cvat/apps/engine/migrations/0084_honeypot_support.py +++ b/cvat/apps/engine/migrations/0084_honeypot_support.py @@ -1,7 +1,7 @@ # Generated by Django 4.2.15 on 2024-09-23 13:11 -from typing import Collection from collections import defaultdict +from collections.abc import Collection import django.db.models.deletion from django.db import migrations, models diff --git a/cvat/apps/engine/migrations/0086_profile_has_analytics_access.py b/cvat/apps/engine/migrations/0086_profile_has_analytics_access.py new file mode 100644 index 000000000000..efd66a0fe230 --- /dev/null +++ b/cvat/apps/engine/migrations/0086_profile_has_analytics_access.py @@ -0,0 +1,35 @@ +# Generated by Django 4.2.16 on 2024-10-22 08:41 + +from django.conf import settings +from django.db import migrations, models + + +def set_has_analytics_access(apps, schema_editor): + User = apps.get_model('auth', 'User') + for user in User.objects.all(): + is_admin = user.groups.filter(name=settings.IAM_ADMIN_ROLE).exists() + user.profile.has_analytics_access = user.is_superuser or is_admin + user.profile.save() + + +class Migration(migrations.Migration): + + dependencies = [ + ("engine", "0085_segment_chunks_updated_date"), + ] + + operations = [ + migrations.AddField( + model_name="profile", + name="has_analytics_access", + field=models.BooleanField( + default=False, + help_text="Designates whether the user can access analytics.", + verbose_name="has access to analytics", + ), + ), + migrations.RunPython( + set_has_analytics_access, + reverse_code=migrations.RunPython.noop, + ), + ] diff --git a/cvat/apps/engine/mixins.py b/cvat/apps/engine/mixins.py index 3e48bf85327e..39f50ed31db4 100644 --- a/cvat/apps/engine/mixins.py +++ b/cvat/apps/engine/mixins.py @@ -8,12 +8,13 @@ import os import os.path import uuid +from collections.abc import Mapping from dataclasses import asdict, dataclass from pathlib import Path from tempfile import NamedTemporaryFile from unittest import mock from textwrap import dedent -from typing import Optional, Callable, Dict, Any, Mapping +from typing import Optional, Callable, Any from urllib.parse import urljoin import django_rq @@ -424,7 +425,7 @@ def export_dataset_v1( request, save_images: bool, *, - get_data: Optional[Callable[[int], Dict[str, Any]]] = None, + get_data: Optional[Callable[[int], dict[str, Any]]] = None, ) -> Response: if request.query_params.get("format"): callback = self.get_export_callback(save_images) diff --git a/cvat/apps/engine/models.py b/cvat/apps/engine/models.py index 647a0ada552a..c25c75404eaf 100644 --- a/cvat/apps/engine/models.py +++ b/cvat/apps/engine/models.py @@ -10,25 +10,28 @@ import re import shutil import uuid +from collections.abc import Collection, Sequence from enum import Enum from functools import cached_property -from typing import Any, ClassVar, Collection, Dict, Optional +from typing import Any, ClassVar, Optional from django.conf import settings from django.contrib.auth.models import User -from django.core.files.storage import FileSystemStorage from django.core.exceptions import ValidationError +from django.core.files.storage import FileSystemStorage from django.db import IntegrityError, models, transaction -from django.db.models.fields import FloatField from django.db.models import Q, TextChoices +from django.db.models.fields import FloatField +from django.utils.translation import gettext_lazy as _ from drf_spectacular.types import OpenApiTypes from drf_spectacular.utils import extend_schema_field from cvat.apps.engine.lazy_list import LazyList from cvat.apps.engine.model_utils import MaybeUndefined -from cvat.apps.engine.utils import parse_specific_attributes, chunked_list +from cvat.apps.engine.utils import parse_specific_attributes, take_by from cvat.apps.events.utils import cache_deleted + class SafeCharField(models.CharField): def get_prep_value(self, value): value = super().get_prep_value(value) @@ -274,6 +277,11 @@ class ValidationLayout(models.Model): disabled_frames = IntArrayField(store_sorted=True, unique_values=True) "Stores task frame numbers of the disabled (deleted) validation frames" + @property + def active_frames(self) -> Sequence[int]: + "An ordered sequence of active (non-disabled) validation frames" + return set(self.frames).difference(self.disabled_frames) + class Data(models.Model): MANIFEST_FILENAME: ClassVar[str] = 'manifest.jsonl' @@ -424,7 +432,7 @@ def touch(self) -> None: @transaction.atomic(savepoint=False) def clear_annotations_in_jobs(job_ids): - for job_ids_chunk in chunked_list(job_ids, chunk_size=1000): + for job_ids_chunk in take_by(job_ids, chunk_size=1000): TrackedShapeAttributeVal.objects.filter(shape__track__job_id__in=job_ids_chunk).delete() TrackedShape.objects.filter(track__job_id__in=job_ids_chunk).delete() LabeledTrackAttributeVal.objects.filter(track__job_id__in=job_ids_chunk).delete() @@ -434,6 +442,30 @@ def clear_annotations_in_jobs(job_ids): LabeledImageAttributeVal.objects.filter(image__job_id__in=job_ids_chunk).delete() LabeledImage.objects.filter(job_id__in=job_ids_chunk).delete() +@transaction.atomic(savepoint=False) +def clear_annotations_on_frames_in_honeypot_task(db_task: Task, frames: Sequence[int]): + if db_task.data.validation_mode != ValidationMode.GT_POOL: + # Tracks are prohibited in honeypot tasks + raise AssertionError + + for frames_batch in take_by(frames, chunk_size=1000): + LabeledShapeAttributeVal.objects.filter( + shape__job_id__segment__task_id=db_task.id, + shape__frame__in=frames_batch, + ).delete() + LabeledShape.objects.filter( + job_id__segment__task_id=db_task.id, + frame__in=frames_batch, + ).delete() + LabeledImageAttributeVal.objects.filter( + image__job_id__segment__task_id=db_task.id, + image__frame__in=frames_batch, + ).delete() + LabeledImage.objects.filter( + job_id__segment__task_id=db_task.id, + frame__in=frames_batch, + ).delete() + class Project(TimestampedModel): name = SafeCharField(max_length=256) owner = models.ForeignKey(User, null=True, blank=True, @@ -793,7 +825,7 @@ def update_or_create(self, *args, **kwargs: Any): return super().update_or_create(*args, **kwargs) - def _validate_constraints(self, obj: Dict[str, Any]): + def _validate_constraints(self, obj: dict[str, Any]): if 'type' not in obj: return @@ -1091,9 +1123,16 @@ class TrackedShapeAttributeVal(AttributeVal): shape = models.ForeignKey(TrackedShape, on_delete=models.DO_NOTHING, related_name='attributes', related_query_name='attribute') + class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) rating = models.FloatField(default=0.0) + has_analytics_access = models.BooleanField( + _("has access to analytics"), + default=False, + help_text=_("Designates whether the user can access analytics."), + ) + class Issue(TimestampedModel): frame = models.PositiveIntegerField() diff --git a/cvat/apps/engine/permissions.py b/cvat/apps/engine/permissions.py index d01036fc9004..c5ddd4799c4c 100644 --- a/cvat/apps/engine/permissions.py +++ b/cvat/apps/engine/permissions.py @@ -4,7 +4,8 @@ # SPDX-License-Identifier: MIT from collections import namedtuple -from typing import Any, Dict, List, Optional, Sequence, Union, cast +from collections.abc import Sequence +from typing import Any, Optional, Union, cast from django.shortcuts import get_object_or_404 from django.conf import settings @@ -21,7 +22,7 @@ from .models import AnnotationGuide, CloudStorage, Issue, Job, Label, Project, Task from cvat.apps.engine.utils import is_dataset_export -def _get_key(d: Dict[str, Any], key_path: Union[str, Sequence[str]]) -> Optional[Any]: +def _get_key(d: dict[str, Any], key_path: Union[str, Sequence[str]]) -> Optional[Any]: """ Like dict.get(), but supports nested fields. If the field is missing, returns None. """ @@ -466,7 +467,7 @@ def __init__(self, **kwargs): self.url = settings.IAM_OPA_DATA_URL + '/tasks/allow' @staticmethod - def get_scopes(request, view, obj) -> List[Scopes]: + def get_scopes(request, view, obj) -> list[Scopes]: Scopes = __class__.Scopes scope = { ('list', 'GET'): Scopes.LIST, @@ -1191,7 +1192,7 @@ class Scopes(StrEnum): CANCEL = 'cancel' @classmethod - def create(cls, request, view, obj: Optional[RQJob], iam_context: Dict): + def create(cls, request, view, obj: Optional[RQJob], iam_context: dict): permissions = [] if view.basename == 'request': for scope in cls.get_scopes(request, view, obj): @@ -1207,7 +1208,7 @@ def __init__(self, **kwargs): self.url = settings.IAM_OPA_DATA_URL + '/requests/allow' @staticmethod - def get_scopes(request, view, obj) -> List[Scopes]: + def get_scopes(request, view, obj) -> list[Scopes]: Scopes = __class__.Scopes return [{ ('list', 'GET'): Scopes.LIST, diff --git a/cvat/apps/engine/rq_job_handler.py b/cvat/apps/engine/rq_job_handler.py index 25900fba20a9..c5b31336ecdc 100644 --- a/cvat/apps/engine/rq_job_handler.py +++ b/cvat/apps/engine/rq_job_handler.py @@ -6,12 +6,31 @@ import attrs -from typing import Optional, Union +from typing import Optional, Union, Any from uuid import UUID from rq.job import Job as RQJob from .models import RequestAction, RequestTarget, RequestSubresource +class RQMeta: + @staticmethod + def get_resettable_fields() -> list[RQJobMetaField]: + """Return a list of fields that must be reset on retry""" + return [ + RQJobMetaField.FORMATTED_EXCEPTION, + RQJobMetaField.PROGRESS, + RQJobMetaField.TASK_PROGRESS, + RQJobMetaField.STATUS + ] + + @classmethod + def reset_meta_on_retry(cls, meta_to_update: dict[RQJobMetaField, Any]) -> dict[RQJobMetaField, Any]: + resettable_fields = cls.get_resettable_fields() + + return { + k: v for k, v in meta_to_update.items() if k not in resettable_fields + } + class RQJobMetaField: # common fields FORMATTED_EXCEPTION = "formatted_exception" @@ -28,7 +47,8 @@ class RQJobMetaField: # export specific fields RESULT_URL = 'result_url' FUNCTION_ID = 'function_id' - + EXCEPTION_TYPE = 'exc_type' + EXCEPTION_ARGS = 'exc_args' def is_rq_job_owner(rq_job: RQJob, user_id: int) -> bool: return rq_job.meta.get(RQJobMetaField.USER, {}).get('id') == user_id diff --git a/cvat/apps/engine/rules/annotationguides.rego b/cvat/apps/engine/rules/annotationguides.rego index dd512af6d79a..6429eecb23a4 100644 --- a/cvat/apps/engine/rules/annotationguides.rego +++ b/cvat/apps/engine/rules/annotationguides.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/engine/rules/cloudstorages.rego b/cvat/apps/engine/rules/cloudstorages.rego index 3e278a35a7d5..04f8e0e45369 100644 --- a/cvat/apps/engine/rules/cloudstorages.rego +++ b/cvat/apps/engine/rules/cloudstorages.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/engine/rules/comments.rego b/cvat/apps/engine/rules/comments.rego index 019a5ebcecc4..9384d829b091 100644 --- a/cvat/apps/engine/rules/comments.rego +++ b/cvat/apps/engine/rules/comments.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/engine/rules/issues.rego b/cvat/apps/engine/rules/issues.rego index 803dab16c019..d8a487cbcdb1 100644 --- a/cvat/apps/engine/rules/issues.rego +++ b/cvat/apps/engine/rules/issues.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/engine/rules/jobs.rego b/cvat/apps/engine/rules/jobs.rego index 8068f7d6fdf9..7980a08d1bc0 100644 --- a/cvat/apps/engine/rules/jobs.rego +++ b/cvat/apps/engine/rules/jobs.rego @@ -12,7 +12,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/engine/rules/labels.rego b/cvat/apps/engine/rules/labels.rego index a50296377683..1d4344da7fe4 100644 --- a/cvat/apps/engine/rules/labels.rego +++ b/cvat/apps/engine/rules/labels.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/engine/rules/projects.rego b/cvat/apps/engine/rules/projects.rego index 8e40ddc43c8d..bdaabb120135 100644 --- a/cvat/apps/engine/rules/projects.rego +++ b/cvat/apps/engine/rules/projects.rego @@ -12,7 +12,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , @@ -59,19 +59,6 @@ allow if { organizations.has_perm(organizations.SUPERVISOR) } -allow if { - input.scope in {utils.CREATE, utils.IMPORT_BACKUP} - utils.is_sandbox - utils.has_perm(utils.BUSINESS) -} - -allow if { - input.scope in {utils.CREATE, utils.IMPORT_BACKUP} - input.auth.organization.id == input.resource.organization.id - utils.has_perm(utils.BUSINESS) - organizations.has_perm(organizations.SUPERVISOR) -} - allow if { input.scope == utils.LIST utils.is_sandbox diff --git a/cvat/apps/engine/rules/server.rego b/cvat/apps/engine/rules/server.rego index bfe3b47a0d46..6833826a0762 100644 --- a/cvat/apps/engine/rules/server.rego +++ b/cvat/apps/engine/rules/server.rego @@ -9,7 +9,7 @@ import data.utils # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/engine/rules/tasks.rego b/cvat/apps/engine/rules/tasks.rego index 99d126d2b443..f020cf4ac976 100644 --- a/cvat/apps/engine/rules/tasks.rego +++ b/cvat/apps/engine/rules/tasks.rego @@ -13,7 +13,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , @@ -93,19 +93,6 @@ allow if { organizations.has_perm(organizations.SUPERVISOR) } -allow if { - input.scope in {utils.CREATE, utils.IMPORT_BACKUP} - utils.is_sandbox - utils.has_perm(utils.BUSINESS) -} - -allow if { - input.scope in {utils.CREATE, utils.IMPORT_BACKUP} - input.auth.organization.id == input.resource.organization.id - utils.has_perm(utils.BUSINESS) - organizations.has_perm(organizations.SUPERVISOR) -} - allow if { input.scope == utils.CREATE_IN_PROJECT utils.is_sandbox @@ -128,20 +115,6 @@ allow if { is_project_staff } -allow if { - input.scope == utils.CREATE_IN_PROJECT - utils.is_sandbox - utils.has_perm(utils.BUSINESS) - is_project_staff -} - -allow if { - input.scope == utils.CREATE_IN_PROJECT - input.auth.organization.id == input.resource.organization.id - utils.has_perm(utils.BUSINESS) - organizations.has_perm(organizations.SUPERVISOR) -} - allow if { input.scope == utils.LIST utils.is_sandbox diff --git a/cvat/apps/engine/rules/tests/generators/annotationguides_test.gen.rego.py b/cvat/apps/engine/rules/tests/generators/annotationguides_test.gen.rego.py index 4cf562741677..1dbfcc1167f5 100644 --- a/cvat/apps/engine/rules/tests/generators/annotationguides_test.gen.rego.py +++ b/cvat/apps/engine/rules/tests/generators/annotationguides_test.gen.rego.py @@ -46,7 +46,7 @@ def read_rules(name): "job:assignee", "none", ] -GROUPS = ["admin", "business", "user", "worker"] +GROUPS = ["admin", "user", "worker"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] SAME_ORG = [True, False] diff --git a/cvat/apps/engine/rules/tests/generators/cloudstorages_test.gen.rego.py b/cvat/apps/engine/rules/tests/generators/cloudstorages_test.gen.rego.py index 63460df540b2..4a4941e0fd1c 100644 --- a/cvat/apps/engine/rules/tests/generators/cloudstorages_test.gen.rego.py +++ b/cvat/apps/engine/rules/tests/generators/cloudstorages_test.gen.rego.py @@ -41,7 +41,7 @@ def read_rules(name): SCOPES = {rule["scope"] for rule in simple_rules} CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["owner", "none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] SAME_ORG = [False, True] diff --git a/cvat/apps/engine/rules/tests/generators/comments_test.gen.rego.py b/cvat/apps/engine/rules/tests/generators/comments_test.gen.rego.py index f36c8a7dfa0d..a13a1897c66a 100644 --- a/cvat/apps/engine/rules/tests/generators/comments_test.gen.rego.py +++ b/cvat/apps/engine/rules/tests/generators/comments_test.gen.rego.py @@ -51,7 +51,7 @@ def read_rules(name): "owner", "none", ] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] SAME_ORG = [True, False] HAS_PROJ = [True, False] diff --git a/cvat/apps/engine/rules/tests/generators/issues_test.gen.rego.py b/cvat/apps/engine/rules/tests/generators/issues_test.gen.rego.py index 0a35d83880eb..53213eb39d2d 100644 --- a/cvat/apps/engine/rules/tests/generators/issues_test.gen.rego.py +++ b/cvat/apps/engine/rules/tests/generators/issues_test.gen.rego.py @@ -50,7 +50,7 @@ def read_rules(name): "assignee", "none", ] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] SAME_ORG = [True, False] HAS_PROJ = [True, False] diff --git a/cvat/apps/engine/rules/tests/generators/jobs_test.gen.rego.py b/cvat/apps/engine/rules/tests/generators/jobs_test.gen.rego.py index ca799f953cd3..e36f8c8ec7be 100644 --- a/cvat/apps/engine/rules/tests/generators/jobs_test.gen.rego.py +++ b/cvat/apps/engine/rules/tests/generators/jobs_test.gen.rego.py @@ -50,7 +50,7 @@ def read_rules(name): "assignee", "none", ] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] SAME_ORG = [True, False] diff --git a/cvat/apps/engine/rules/tests/generators/projects_test.gen.rego.py b/cvat/apps/engine/rules/tests/generators/projects_test.gen.rego.py index 6657f21d2994..d4a7259893fc 100644 --- a/cvat/apps/engine/rules/tests/generators/projects_test.gen.rego.py +++ b/cvat/apps/engine/rules/tests/generators/projects_test.gen.rego.py @@ -41,7 +41,7 @@ def read_rules(name): SCOPES = {rule["scope"] for rule in simple_rules} CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["owner", "assignee", "none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] SAME_ORG = [False, True] diff --git a/cvat/apps/engine/rules/tests/generators/server_test.gen.rego.py b/cvat/apps/engine/rules/tests/generators/server_test.gen.rego.py index 8e9b57a814d8..c2b4195191a4 100644 --- a/cvat/apps/engine/rules/tests/generators/server_test.gen.rego.py +++ b/cvat/apps/engine/rules/tests/generators/server_test.gen.rego.py @@ -41,7 +41,7 @@ def read_rules(name): SCOPES = {rule["scope"] for rule in simple_rules} CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] diff --git a/cvat/apps/engine/rules/tests/generators/tasks_test.gen.rego.py b/cvat/apps/engine/rules/tests/generators/tasks_test.gen.rego.py index 61da5c8520de..30925fcee18b 100644 --- a/cvat/apps/engine/rules/tests/generators/tasks_test.gen.rego.py +++ b/cvat/apps/engine/rules/tests/generators/tasks_test.gen.rego.py @@ -43,7 +43,7 @@ def read_rules(name): SCOPES = list({rule["scope"] for rule in simple_rules}) CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["project:owner", "project:assignee", "owner", "assignee", "none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] SAME_ORG = [True, False] diff --git a/cvat/apps/engine/rules/tests/generators/users_test.gen.rego.py b/cvat/apps/engine/rules/tests/generators/users_test.gen.rego.py index 595cbaae4ee4..a609492868f6 100644 --- a/cvat/apps/engine/rules/tests/generators/users_test.gen.rego.py +++ b/cvat/apps/engine/rules/tests/generators/users_test.gen.rego.py @@ -41,7 +41,7 @@ def read_rules(name): SCOPES = {rule["scope"] for rule in simple_rules} CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["self", "none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] diff --git a/cvat/apps/engine/rules/users.rego b/cvat/apps/engine/rules/users.rego index 63469228e11a..34cb0f4866d3 100644 --- a/cvat/apps/engine/rules/users.rego +++ b/cvat/apps/engine/rules/users.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/engine/schema.py b/cvat/apps/engine/schema.py index 5931381b403d..f3914a03dddd 100644 --- a/cvat/apps/engine/schema.py +++ b/cvat/apps/engine/schema.py @@ -3,7 +3,6 @@ # SPDX-License-Identifier: MIT import textwrap -from typing import Type from drf_spectacular.extensions import OpenApiSerializerExtension from drf_spectacular.plumbing import build_basic_type, force_instance @@ -15,7 +14,7 @@ def _copy_serializer( instance: serializers.Serializer, *, - _new_type: Type[serializers.Serializer] = None, + _new_type: type[serializers.Serializer] = None, **kwargs ) -> serializers.Serializer: _new_type = _new_type or type(instance) diff --git a/cvat/apps/engine/serializers.py b/cvat/apps/engine/serializers.py index 6b858ceb25e4..9f772cd24e6d 100644 --- a/cvat/apps/engine/serializers.py +++ b/cvat/apps/engine/serializers.py @@ -3,39 +3,47 @@ # # SPDX-License-Identifier: MIT +from __future__ import annotations + +from collections import OrderedDict +from collections.abc import Iterable, Sequence from contextlib import closing import warnings from copy import copy +from datetime import timedelta +from decimal import Decimal from inspect import isclass import os import re import shutil import string -import rq.defaults as rq_defaults - from tempfile import NamedTemporaryFile import textwrap -from typing import Any, Dict, Iterable, Optional, OrderedDict, Union - -from rq.job import Job as RQJob, JobStatus as RQJobStatus -from datetime import timedelta -from decimal import Decimal +from typing import Any, Optional, Union -from rest_framework import serializers, exceptions +import django_rq +from django.conf import settings from django.contrib.auth.models import User, Group from django.db import transaction +from django.db.models import prefetch_related_objects, Prefetch from django.utils import timezone from numpy import random +from rest_framework import serializers, exceptions +import rq.defaults as rq_defaults +from rq.job import Job as RQJob, JobStatus as RQJobStatus from cvat.apps.dataset_manager.formats.utils import get_label_color -from cvat.apps.engine.frame_provider import TaskFrameProvider -from cvat.apps.engine.utils import format_list, parse_exception_message from cvat.apps.engine import field_validation, models +from cvat.apps.engine.frame_provider import TaskFrameProvider, FrameQuality from cvat.apps.engine.cloud_provider import get_cloud_storage_instance, Credentials, Status from cvat.apps.engine.log import ServerLogManager from cvat.apps.engine.permissions import TaskPermission -from cvat.apps.engine.utils import parse_specific_attributes, build_field_filter_params, get_list_view_name, reverse +from cvat.apps.engine.task_validation import HoneypotFrameSelector from cvat.apps.engine.rq_job_handler import RQJobMetaField, RQId +from cvat.apps.engine.utils import ( + format_list, grouped, parse_exception_message, CvatChunkTimestampMismatchError, + parse_specific_attributes, build_field_filter_params, get_list_view_name, reverse, take_by +) from drf_spectacular.utils import OpenApiExample, extend_schema_field, extend_schema_serializer @@ -223,21 +231,28 @@ class Meta: model = User fields = ('url', 'id', 'username', 'first_name', 'last_name') + class UserSerializer(serializers.ModelSerializer): groups = serializers.SlugRelatedField(many=True, slug_field='name', queryset=Group.objects.all()) + has_analytics_access = serializers.BooleanField( + source='profile.has_analytics_access', + required=False, + read_only=True, + ) class Meta: model = User fields = ('url', 'id', 'username', 'first_name', 'last_name', 'email', 'groups', 'is_staff', 'is_superuser', 'is_active', 'last_login', - 'date_joined') - read_only_fields = ('last_login', 'date_joined') + 'date_joined', 'has_analytics_access') + read_only_fields = ('last_login', 'date_joined', 'has_analytics_access') write_only_fields = ('password', ) extra_kwargs = { 'last_login': { 'allow_null': True } } + class DelimitedStringListField(serializers.ListField): def to_representation(self, value): return super().to_representation(value.split('\n')) @@ -245,6 +260,7 @@ def to_representation(self, value): def to_internal_value(self, data): return '\n'.join(super().to_internal_value(data)) + class AttributeSerializer(serializers.ModelSerializer): id = serializers.IntegerField(required=False) values = DelimitedStringListField(allow_empty=True, @@ -255,6 +271,7 @@ class Meta: model = models.AttributeSpec fields = ('id', 'name', 'mutable', 'input_type', 'default_value', 'values') + class SublabelSerializer(serializers.ModelSerializer): id = serializers.IntegerField(required=False) attributes = AttributeSerializer(many=True, source='attributespec_set', default=[], @@ -273,6 +290,7 @@ class Meta: fields = ('id', 'name', 'color', 'attributes', 'type', 'has_parent', ) read_only_fields = ('parent',) + class SkeletonSerializer(serializers.ModelSerializer): id = serializers.IntegerField(required=False) svg = serializers.CharField(allow_blank=True, required=False) @@ -281,6 +299,7 @@ class Meta: model = models.Skeleton fields = ('id', 'svg',) + class LabelSerializer(SublabelSerializer): deleted = serializers.BooleanField(required=False, write_only=True, help_text='Delete the label. Only applicable in the PATCH methods of a project or a task.') @@ -350,9 +369,9 @@ def check_attribute_names_unique(attrs): @transaction.atomic def update_label( cls, - validated_data: Dict[str, Any], + validated_data: dict[str, Any], svg: str, - sublabels: Iterable[Dict[str, Any]], + sublabels: Iterable[dict[str, Any]], *, parent_instance: Union[models.Project, models.Task], parent_label: Optional[models.Label] = None @@ -466,7 +485,7 @@ def update_label( @classmethod @transaction.atomic def create_labels(cls, - labels: Iterable[Dict[str, Any]], + labels: Iterable[dict[str, Any]], *, parent_instance: Union[models.Project, models.Task], parent_label: Optional[models.Label] = None @@ -517,7 +536,7 @@ def create_labels(cls, @classmethod @transaction.atomic def update_labels(cls, - labels: Iterable[Dict[str, Any]], + labels: Iterable[dict[str, Any]], *, parent_instance: Union[models.Project, models.Task], parent_label: Optional[models.Label] = None @@ -947,6 +966,13 @@ class JobValidationLayoutWriteSerializer(serializers.Serializer): """.format(models.JobFrameSelectionMethod.MANUAL)) ) + def __init__( + self, *args, bulk_context: _TaskValidationLayoutBulkUpdateContext | None = None, **kwargs + ): + super().__init__(*args, **kwargs) + + self._bulk_context = bulk_context + def validate(self, attrs): frame_selection_method = attrs["frame_selection_method"] if frame_selection_method == models.JobFrameSelectionMethod.MANUAL: @@ -969,9 +995,10 @@ def validate(self, attrs): @transaction.atomic def update(self, instance: models.Job, validated_data: dict[str, Any]) -> models.Job: - from cvat.apps.engine.cache import MediaCache - from cvat.apps.engine.frame_provider import FrameQuality, JobFrameProvider, prepare_chunk - from cvat.apps.dataset_manager.task import JobAnnotation, AnnotationManager + from cvat.apps.engine.cache import ( + MediaCache, Callback, enqueue_create_chunk_job, wait_for_rq_job + ) + from cvat.apps.engine.frame_provider import JobFrameProvider db_job = instance db_segment = db_job.segment @@ -997,22 +1024,30 @@ def update(self, instance: models.Job, validated_data: dict[str, Any]) -> models def _to_rel_frame(abs_frame: int) -> int: return (abs_frame - db_data.start_frame) // frame_step - all_task_frames: dict[int, models.Image] = { - _to_rel_frame(frame.frame): frame - for frame in db_data.images.all() - } - task_honeypot_frames = set( - _to_rel_frame(frame_id) - for frame_id, frame in all_task_frames.items() - if frame.is_placeholder - ) - segment_frame_set = set(map(_to_rel_frame, db_segment.frame_set)) - segment_honeypots = sorted(segment_frame_set & task_honeypot_frames) + def _to_abs_frame(rel_frame: int) -> int: + return rel_frame * frame_step + db_data.start_frame - deleted_task_frames = db_data.deleted_frames - task_all_validation_frames = set(map(_to_rel_frame, db_task.gt_job.segment.frame_set)) - task_active_validation_frames = task_all_validation_frames.difference(deleted_task_frames) + bulk_context = self._bulk_context + if bulk_context: + db_frames = bulk_context.all_db_frames + task_honeypot_frames = set(bulk_context.honeypot_frames) + task_all_validation_frames = set(bulk_context.all_validation_frames) + task_active_validation_frames = set(bulk_context.active_validation_frames) + else: + db_frames: dict[int, models.Image] = { + _to_rel_frame(frame.frame): frame + for frame in db_data.images.all() + } + task_honeypot_frames = set( + _to_rel_frame(frame_id) + for frame_id, frame in db_frames.items() + if frame.is_placeholder + ) + task_all_validation_frames = set(db_data.validation_layout.frames) + task_active_validation_frames = set(db_data.validation_layout.active_frames) + segment_frame_set = set(map(_to_rel_frame, db_segment.frame_set)) + segment_honeypots = sorted(segment_frame_set & task_honeypot_frames) segment_honeypots_count = len(segment_honeypots) frame_selection_method = validated_data['frame_selection_method'] @@ -1065,69 +1100,73 @@ def _to_rel_frame(abs_frame: int) -> int: ) ) - # Guarantee uniformness by using a known distribution - # overall task honeypot distribution is not guaranteed though - rng = random.Generator(random.MT19937()) - requested_frames = rng.choice( - tuple(task_active_validation_frames), size=segment_honeypots_count, - shuffle=False, replace=False - ).tolist() + if bulk_context: + active_validation_frame_counts = bulk_context.active_validation_frame_counts + else: + active_validation_frame_counts = { + validation_frame: 0 for validation_frame in task_active_validation_frames + } + for task_honeypot_frame in task_honeypot_frames: + real_frame = _to_rel_frame(db_frames[task_honeypot_frame].real_frame) + if real_frame in task_active_validation_frames: + active_validation_frame_counts[real_frame] += 1 + + frame_selector = HoneypotFrameSelector(active_validation_frame_counts) + requested_frames = frame_selector.select_next_frames(segment_honeypots_count) + requested_frames = list(map(_to_abs_frame, requested_frames)) else: assert False # Replace validation frames in the job - old_honeypot_real_ids = [] - updated_db_frames = [] + updated_honeypots = {} for frame, requested_validation_frame in zip(segment_honeypots, requested_frames): - db_requested_frame = all_task_frames[requested_validation_frame] - db_segment_frame = all_task_frames[frame] + db_requested_frame = db_frames[requested_validation_frame] + db_segment_frame = db_frames[frame] assert db_segment_frame.is_placeholder - old_honeypot_real_ids.append(_to_rel_frame(db_segment_frame.real_frame)) + if db_segment_frame.real_frame == db_requested_frame.frame: + continue # Change image in the current segment honeypot frame + db_segment_frame.real_frame = db_requested_frame.frame + db_segment_frame.path = db_requested_frame.path db_segment_frame.width = db_requested_frame.width db_segment_frame.height = db_requested_frame.height - db_segment_frame.real_frame = db_requested_frame.frame - db_segment_frame.related_files.set(db_requested_frame.related_files.all()) - updated_db_frames.append(db_segment_frame) + updated_honeypots[frame] = db_segment_frame - updated_validation_frames = [ - frame - for new_validation_frame, old_validation_frame, frame in zip( - requested_frames, old_honeypot_real_ids, segment_honeypots - ) - if new_validation_frame != old_validation_frame - ] - if updated_validation_frames: - models.Image.objects.bulk_update( - updated_db_frames, fields=['path', 'width', 'height', 'real_frame'] - ) + if updated_honeypots: + if bulk_context: + bulk_context.updated_honeypots.update(updated_honeypots) + else: + # Update image infos + models.Image.objects.bulk_update( + updated_honeypots.values(), fields=['path', 'width', 'height', 'real_frame'] + ) - # Remove annotations on changed validation frames - job_annotation = JobAnnotation(db_job.id) - job_annotation.init_from_db() - job_annotation_manager = AnnotationManager( - job_annotation.ir_data, dimension=db_task.dimension - ) - job_annotation_manager.clear_frames( - segment_frame_set.difference(updated_validation_frames) - ) - job_annotation.delete(job_annotation_manager.data) + models.RelatedFile.images.through.objects.filter( + image_id__in=updated_honeypots + ).delete() + + for updated_honeypot in updated_honeypots.values(): + validation_frame = db_frames[_to_rel_frame(updated_honeypot.real_frame)] + updated_honeypot.related_files.set(validation_frame.related_files.all()) + + # Remove annotations on changed validation frames + self._clear_annotations_on_frames(db_segment, updated_honeypots) # Update chunks - task_frame_provider = TaskFrameProvider(db_task) job_frame_provider = JobFrameProvider(db_job) updated_segment_chunk_ids = set( job_frame_provider.get_chunk_number(updated_segment_frame_id) - for updated_segment_frame_id in updated_validation_frames + for updated_segment_frame_id in updated_honeypots ) segment_frames = sorted(segment_frame_set) segment_frame_map = dict(zip(segment_honeypots, requested_frames)) - media_cache = MediaCache() + chunks_to_be_removed = [] + queue = django_rq.get_queue(settings.CVAT_QUEUES.CHUNKS.value) for chunk_id in sorted(updated_segment_chunk_ids): chunk_frames = segment_frames[ chunk_id * db_data.chunk_size : @@ -1135,59 +1174,128 @@ def _to_rel_frame(abs_frame: int) -> int: ] for quality in FrameQuality.__members__.values(): - def _write_updated_static_chunk(): - def _iterate_chunk_frames(): - for chunk_frame in chunk_frames: - db_frame = all_task_frames[chunk_frame] - chunk_real_frame = segment_frame_map.get(chunk_frame, chunk_frame) - yield ( - task_frame_provider.get_frame( - chunk_real_frame, quality=quality - ).data, - os.path.basename(db_frame.path), - chunk_frame, - ) - - with closing(_iterate_chunk_frames()) as frame_iter: - chunk, _ = prepare_chunk( - frame_iter, quality=quality, db_task=db_task, dump_unchanged=True, - ) - - get_chunk_path = { - FrameQuality.COMPRESSED: db_data.get_compressed_segment_chunk_path, - FrameQuality.ORIGINAL: db_data.get_original_segment_chunk_path, - }[quality] - - with open(get_chunk_path(chunk_id, db_segment.id), 'wb') as f: - f.write(chunk.getvalue()) - if db_data.storage_method == models.StorageMethodChoice.FILE_SYSTEM: - _write_updated_static_chunk() + rq_id = f"segment_{db_segment.id}_write_chunk_{chunk_id}_{quality}" + rq_job = enqueue_create_chunk_job( + queue=queue, + rq_job_id=rq_id, + create_callback=Callback( + callable=self._write_updated_static_chunk, + args=[ + db_segment.id, + chunk_id, + chunk_frames, + quality, + { + chunk_frame: db_frames[chunk_frame].path + for chunk_frame in chunk_frames + }, + segment_frame_map, + ], + ), + ) + wait_for_rq_job(rq_job) + + chunks_to_be_removed.append( + {'db_segment': db_segment, 'chunk_number': chunk_id, 'quality': quality} + ) + + context_image_chunks_to_be_removed = [ + {"db_data": db_data, "frame_number": f} for f in updated_honeypots + ] - media_cache.remove_segment_chunk(db_segment, chunk_id, quality=quality) + if bulk_context: + bulk_context.chunks_to_be_removed.extend(chunks_to_be_removed) + bulk_context.context_image_chunks_to_be_removed.extend( + context_image_chunks_to_be_removed + ) + bulk_context.segments_with_updated_chunks.append(db_segment.id) + else: + media_cache = MediaCache() + media_cache.remove_segments_chunks(chunks_to_be_removed) + media_cache.remove_context_images_chunks(context_image_chunks_to_be_removed) - db_segment.chunks_updated_date = timezone.now() - db_segment.save(update_fields=['chunks_updated_date']) + db_segment.chunks_updated_date = timezone.now() + db_segment.save(update_fields=['chunks_updated_date']) - if updated_validation_frames or ( + if updated_honeypots or ( # even if the randomly selected frames were the same as before, we should still # consider it an update to the validation frames and restore them, if they were deleted frame_selection_method == models.JobFrameSelectionMethod.RANDOM_UNIFORM ): - if set(deleted_task_frames).intersection(updated_validation_frames): + # deleted frames that were updated in the job should be restored, as they are new now + if set(db_data.deleted_frames).intersection(updated_honeypots): db_data.deleted_frames = sorted( - set(deleted_task_frames).difference(updated_validation_frames) + set(db_data.deleted_frames).difference(updated_honeypots) ) db_data.save(update_fields=['deleted_frames']) - db_job.touch() - db_segment.job_set.exclude(id=db_job.id).update(updated_date=timezone.now()) - db_task.touch() - if db_task.project: - db_task.project.touch() + new_updated_date = timezone.now() + db_job.updated_date = new_updated_date + + if bulk_context: + bulk_context.updated_segments.append(db_segment.id) + else: + db_segment.job_set.update(updated_date=new_updated_date) + + db_task.touch() + if db_task.project: + db_task.project.touch() return instance + def _clear_annotations_on_frames(self, segment: models.Segment, frames: Sequence[int]): + models.clear_annotations_on_frames_in_honeypot_task(segment.task, frames=frames) + + @staticmethod + def _write_updated_static_chunk( + db_segment_id: int, + chunk_id: int, + chunk_frames: list[int], + quality: FrameQuality, + frame_path_map: dict[int, str], + segment_frame_map: dict[int,int], + ): + from cvat.apps.engine.frame_provider import prepare_chunk + + db_segment = models.Segment.objects.select_related("task").get(pk=db_segment_id) + initial_chunks_updated_date = db_segment.chunks_updated_date + db_task = db_segment.task + task_frame_provider = TaskFrameProvider(db_task) + db_data = db_task.data + + def _iterate_chunk_frames(): + for chunk_frame in chunk_frames: + db_frame_path = frame_path_map[chunk_frame] + chunk_real_frame = segment_frame_map.get(chunk_frame, chunk_frame) + yield ( + task_frame_provider.get_frame( + chunk_real_frame, quality=quality + ).data, + os.path.basename(db_frame_path), + chunk_frame, + ) + + with closing(_iterate_chunk_frames()) as frame_iter: + chunk, _ = prepare_chunk( + frame_iter, quality=quality, db_task=db_task, dump_unchanged=True, + ) + + get_chunk_path = { + FrameQuality.COMPRESSED: db_data.get_compressed_segment_chunk_path, + FrameQuality.ORIGINAL: db_data.get_original_segment_chunk_path, + }[quality] + + db_segment.refresh_from_db(fields=["chunks_updated_date"]) + if db_segment.chunks_updated_date > initial_chunks_updated_date: + raise CvatChunkTimestampMismatchError( + "Attempting to write an out of date static chunk, " + f"segment.chunks_updated_date: {db_segment.chunks_updated_date}, " + f"expected_ts: {initial_chunks_updated_date}" + ) + with open(get_chunk_path(chunk_id, db_segment_id), 'wb') as f: + f.write(chunk.getvalue()) + class JobValidationLayoutReadSerializer(serializers.Serializer): honeypot_count = serializers.IntegerField(min_value=0, required=False) honeypot_frames = serializers.ListField( @@ -1242,6 +1350,28 @@ def _to_rel_frame(abs_frame: int) -> int: return super().to_representation(data) +class _TaskValidationLayoutBulkUpdateContext: + def __init__( + self, + *, + all_db_frames: dict[int, models.Image], + honeypot_frames: list[int], + all_validation_frames: list[int], + active_validation_frames: list[int], + validation_frame_counts: dict[int, int] | None = None + ): + self.updated_honeypots: dict[int, models.Image] = {} + self.updated_segments: list[int] = [] + self.chunks_to_be_removed: list[dict[str, Any]] = [] + self.context_image_chunks_to_be_removed: list[dict[str, Any]] = [] + self.segments_with_updated_chunks: list[int] = [] + + self.all_db_frames = all_db_frames + self.honeypot_frames = honeypot_frames + self.all_validation_frames = all_validation_frames + self.active_validation_frames = active_validation_frames + self.active_validation_frame_counts = validation_frame_counts + class TaskValidationLayoutWriteSerializer(serializers.Serializer): disabled_frames = serializers.ListField( child=serializers.IntegerField(min_value=0), required=False, @@ -1282,18 +1412,20 @@ def validate(self, attrs): @transaction.atomic def update(self, instance: models.Task, validated_data: dict[str, Any]) -> models.Task: - validation_layout = getattr(instance.data, 'validation_layout', None) - if not validation_layout: + db_validation_layout: models.ValidationLayout | None = ( + getattr(instance.data, 'validation_layout', None) + ) + if not db_validation_layout: raise serializers.ValidationError("Validation is not configured in the task") if 'disabled_frames' in validated_data: requested_disabled_frames = validated_data['disabled_frames'] unknown_requested_disabled_frames = ( - set(requested_disabled_frames).difference(validation_layout.frames) + set(requested_disabled_frames).difference(db_validation_layout.frames) ) if unknown_requested_disabled_frames: raise serializers.ValidationError( - "Unknown frames requested for exclusion from the validation set {}".format( + "Unknown frames requested for exclusion from the validation set: {}".format( format_list(tuple(map(str, sorted(unknown_requested_disabled_frames)))) ) ) @@ -1304,9 +1436,12 @@ def update(self, instance: models.Task, validated_data: dict[str, Any]) -> model gt_job_meta_serializer.is_valid(raise_exception=True) gt_job_meta_serializer.save() + db_validation_layout.refresh_from_db() + instance.data.refresh_from_db() + frame_selection_method = validated_data.get('frame_selection_method') if frame_selection_method and not ( - validation_layout and + db_validation_layout and instance.data.validation_layout.mode == models.ValidationMode.GT_POOL ): raise serializers.ValidationError( @@ -1314,52 +1449,190 @@ def update(self, instance: models.Task, validated_data: dict[str, Any]) -> model f"validation mode is '{models.ValidationMode.GT_POOL}'" ) - if frame_selection_method == models.JobFrameSelectionMethod.MANUAL: - requested_honeypot_real_frames = validated_data['honeypot_real_frames'] + if not frame_selection_method: + return instance - task_honeypot_abs_frames = ( - instance.data.images - .filter(is_placeholder=True) - .order_by('frame') - .values_list('frame', flat=True) - ) + # Populate the prefetch cache for required objects + prefetch_related_objects([instance], + Prefetch('data__images', queryset=models.Image.objects.order_by('frame')), + 'segment_set', + 'segment_set__job_set', + ) + + frame_provider = TaskFrameProvider(instance) + db_frames = { + frame_provider.get_rel_frame_number(db_image.frame): db_image + for db_image in instance.data.images.all() + } + honeypot_frames = sorted(f for f, v in db_frames.items() if v.is_placeholder) + all_validation_frames = db_validation_layout.frames + active_validation_frames = db_validation_layout.active_frames + + bulk_context = _TaskValidationLayoutBulkUpdateContext( + all_db_frames=db_frames, + honeypot_frames=honeypot_frames, + all_validation_frames=all_validation_frames, + active_validation_frames=active_validation_frames, + ) - task_honeypot_frames_count = len(task_honeypot_abs_frames) + if frame_selection_method == models.JobFrameSelectionMethod.MANUAL: + requested_honeypot_real_frames = validated_data['honeypot_real_frames'] + task_honeypot_frames_count = len(honeypot_frames) if task_honeypot_frames_count != len(requested_honeypot_real_frames): raise serializers.ValidationError( "Invalid size of 'honeypot_real_frames' array, " f"expected {task_honeypot_frames_count}" ) + elif frame_selection_method == models.JobFrameSelectionMethod.RANDOM_UNIFORM: + # Reset distribution for active validation frames + bulk_context.active_validation_frame_counts = { f: 0 for f in active_validation_frames } + + # Could be done using Django ORM, but using order_by() and filter() + # would result in an extra DB request + db_jobs = sorted( + ( + db_job + for db_segment in instance.segment_set.all() + for db_job in db_segment.job_set.all() + if db_job.type == models.JobType.ANNOTATION + ), + key=lambda j: j.segment.start_frame + ) + for db_job in db_jobs: + job_serializer_params = { + 'frame_selection_method': frame_selection_method + } - if frame_selection_method: - for db_job in ( - models.Job.objects.select_related("segment") - .filter(segment__task_id=instance.id, type=models.JobType.ANNOTATION) - .order_by("segment__start_frame") - .all() - ): - job_serializer_params = { - 'frame_selection_method': frame_selection_method - } + if frame_selection_method == models.JobFrameSelectionMethod.MANUAL: + segment_frame_set = db_job.segment.frame_set + job_serializer_params['honeypot_real_frames'] = [ + requested_frame + for rel_frame, requested_frame in zip( + honeypot_frames, requested_honeypot_real_frames + ) + if frame_provider.get_abs_frame_number(rel_frame) in segment_frame_set + ] - if frame_selection_method == models.JobFrameSelectionMethod.MANUAL: - segment_frame_set = db_job.segment.frame_set - job_serializer_params['honeypot_real_frames'] = [ - requested_frame - for abs_frame, requested_frame in zip( - task_honeypot_abs_frames, requested_honeypot_real_frames - ) - if abs_frame in segment_frame_set - ] + job_validation_layout_serializer = JobValidationLayoutWriteSerializer( + db_job, job_serializer_params, bulk_context=bulk_context + ) + job_validation_layout_serializer.is_valid(raise_exception=True) + job_validation_layout_serializer.save() - job_validation_layout_serializer = JobValidationLayoutWriteSerializer( - db_job, job_serializer_params - ) - job_validation_layout_serializer.is_valid(raise_exception=True) - job_validation_layout_serializer.save() + self._perform_bulk_updates(instance, bulk_context=bulk_context) return instance + def _perform_bulk_updates( + self, + db_task: models.Task, + *, + bulk_context: _TaskValidationLayoutBulkUpdateContext, + ): + updated_segments = bulk_context.updated_segments + if not updated_segments: + return + + self._update_frames_in_bulk(db_task, bulk_context=bulk_context) + + # Import it here to avoid circular import + from cvat.apps.engine.cache import MediaCache + media_cache = MediaCache() + media_cache.remove_segments_chunks(bulk_context.chunks_to_be_removed) + media_cache.remove_context_images_chunks(bulk_context.context_image_chunks_to_be_removed) + + # Update segments + updated_date = timezone.now() + for updated_segments_batch in take_by(updated_segments, chunk_size=1000): + models.Job.objects.filter( + segment_id__in=updated_segments_batch + ).update(updated_date=updated_date) + + for updated_segment_chunks_batch in take_by( + bulk_context.segments_with_updated_chunks, chunk_size=1000 + ): + models.Segment.objects.filter( + id__in=updated_segment_chunks_batch + ).update(chunks_updated_date=updated_date) + + # Update parent objects + db_task.touch() + if db_task.project: + db_task.project.touch() + + def _update_frames_in_bulk( + self, + db_task: models.Task, + *, + bulk_context: _TaskValidationLayoutBulkUpdateContext, + ): + self._clear_annotations_on_frames(db_task, bulk_context.updated_honeypots) + + # The django generated bulk_update() query is too slow, so we use bulk_create() instead + # NOTE: Silk doesn't show these queries in the list of queries + # for some reason, but they can be seen in the profile + models.Image.objects.bulk_create( + list(bulk_context.updated_honeypots.values()), + update_conflicts=True, + update_fields=['path', 'width', 'height', 'real_frame'], + unique_fields=[ + # required for Postgres + # https://docs.djangoproject.com/en/4.2/ref/models/querysets/#bulk-create + 'id' + ], + batch_size=1000, + ) + + # Update related images in 2 steps: remove all m2m for honeypots, then add (copy) new ones + # 1. remove + for updated_honeypots_batch in take_by( + bulk_context.updated_honeypots.values(), chunk_size=1000 + ): + models.RelatedFile.images.through.objects.filter( + image_id__in=(db_honeypot.id for db_honeypot in updated_honeypots_batch) + ).delete() + + # 2. batched add (copy): collect all the new records and insert + frame_provider = TaskFrameProvider(db_task) + honeypots_by_validation_frame = grouped( + bulk_context.updated_honeypots, + key=lambda honeypot_frame: frame_provider.get_rel_frame_number( + bulk_context.updated_honeypots[honeypot_frame].real_frame + ) + ) # validation frame -> [honeypot_frame, ...] + + new_m2m_objects = [] + m2m_objects_by_validation_image_id = grouped( + models.RelatedFile.images.through.objects + .filter(image_id__in=( + bulk_context.all_db_frames[validation_frame].id + for validation_frame in honeypots_by_validation_frame + )) + .all(), + key=lambda m2m_obj: m2m_obj.image_id + ) + for validation_frame, validation_frame_honeypots in honeypots_by_validation_frame.items(): + validation_frame_m2m_objects = m2m_objects_by_validation_image_id.get( + bulk_context.all_db_frames[validation_frame].id + ) + if not validation_frame_m2m_objects: + continue + + # Copy validation frame m2m objects to corresponding honeypots + for honeypot_frame in validation_frame_honeypots: + new_m2m_objects.extend( + models.RelatedFile.images.through( + image_id=bulk_context.all_db_frames[honeypot_frame].id, + relatedfile_id=m2m_obj.relatedfile_id + ) + for m2m_obj in validation_frame_m2m_objects + ) + + models.RelatedFile.images.through.objects.bulk_create(new_m2m_objects, batch_size=1000) + + def _clear_annotations_on_frames(self, db_task: models.Task, frames: Sequence[int]): + models.clear_annotations_on_frames_in_honeypot_task(db_task, frames=frames) + class TaskValidationLayoutReadSerializer(serializers.ModelSerializer): validation_frames = serializers.ListField( child=serializers.IntegerField(min_value=0), source='frames', required=False, @@ -2999,7 +3272,7 @@ class Meta: def _update_related_storages( instance: Union[models.Project, models.Task], - validated_data: Dict[str, Any], + validated_data: dict[str, Any], ) -> None: for storage_type in ('source_storage', 'target_storage'): new_conf = validated_data.pop(storage_type, None) @@ -3054,7 +3327,7 @@ def _update_related_storages( storage_instance.cloud_storage_id = new_cloud_storage_id storage_instance.save() -def _configure_related_storages(validated_data: Dict[str, Any]) -> Dict[str, Optional[models.Storage]]: +def _configure_related_storages(validated_data: dict[str, Any]) -> dict[str, Optional[models.Storage]]: storages = { 'source_storage': None, 'target_storage': None, @@ -3147,7 +3420,7 @@ class RequestDataOperationSerializer(serializers.Serializer): format = serializers.CharField(required=False, allow_null=True) function_id = serializers.CharField(required=False, allow_null=True) - def to_representation(self, rq_job: RQJob) -> Dict[str, Any]: + def to_representation(self, rq_job: RQJob) -> dict[str, Any]: parsed_rq_id: RQId = rq_job.parsed_rq_id return { @@ -3188,7 +3461,7 @@ class RequestSerializer(serializers.Serializer): result_id = serializers.IntegerField(required=False, allow_null=True) @extend_schema_field(UserIdentifiersSerializer()) - def get_owner(self, rq_job: RQJob) -> Dict[str, Any]: + def get_owner(self, rq_job: RQJob) -> dict[str, Any]: return UserIdentifiersSerializer(rq_job.meta[RQJobMetaField.USER]).data @extend_schema_field( @@ -3228,10 +3501,11 @@ def get_message(self, rq_job: RQJob) -> str: return message - def to_representation(self, rq_job: RQJob) -> Dict[str, Any]: + def to_representation(self, rq_job: RQJob) -> dict[str, Any]: representation = super().to_representation(rq_job) - if representation["status"] == RQJobStatus.DEFERRED: + # FUTURE-TODO: support such statuses on UI + if representation["status"] in (RQJobStatus.DEFERRED, RQJobStatus.SCHEDULED): representation["status"] = RQJobStatus.QUEUED if representation["status"] == RQJobStatus.FINISHED: diff --git a/cvat/apps/engine/signals.py b/cvat/apps/engine/signals.py index 297baec9488f..3a964d90c2cc 100644 --- a/cvat/apps/engine/signals.py +++ b/cvat/apps/engine/signals.py @@ -1,13 +1,14 @@ # Copyright (C) 2019-2022 Intel Corporation -# Copyright (C) 2023 CVAT.ai Corporation +# Copyright (C) 2023-2024 CVAT.ai Corporation # # SPDX-License-Identifier: MIT import functools import shutil +from django.conf import settings from django.contrib.auth.models import User from django.db import transaction -from django.db.models.signals import post_delete, post_save +from django.db.models.signals import m2m_changed, post_delete, post_save from django.dispatch import receiver from .models import CloudStorage, Data, Job, Profile, Project, StatusChoice, Task, Asset @@ -36,13 +37,28 @@ def __save_job_handler(instance, created, **kwargs): db_task.status = status db_task.save(update_fields=["status", "updated_date"]) -@receiver(post_save, sender=User, - dispatch_uid=__name__ + ".save_user_handler") -def __save_user_handler(instance, **kwargs): + +@receiver(post_save, sender=User, dispatch_uid=__name__ + ".save_user_handler") +def __save_user_handler(instance: User, **kwargs): + should_access_analytics = instance.is_superuser or instance.groups.filter(name=settings.IAM_ADMIN_ROLE).exists() if not hasattr(instance, 'profile'): profile = Profile() profile.user = instance + profile.has_analytics_access = should_access_analytics profile.save() + elif should_access_analytics and not instance.profile.has_analytics_access: + instance.profile.has_analytics_access = True + instance.profile.save() + + +@receiver(m2m_changed, sender=User.groups.through, dispatch_uid=__name__ + ".m2m_user_groups_change_handler") +def __m2m_user_groups_change_handler(sender, instance: User, action: str, **kwargs): + if action == 'post_add': + is_admin = instance.groups.filter(name=settings.IAM_ADMIN_ROLE).exists() + if is_admin and hasattr(instance, 'profile') and not instance.profile.has_analytics_access: + instance.profile.has_analytics_access = True + instance.profile.save() + @receiver(post_delete, sender=Project, dispatch_uid=__name__ + ".delete_project_handler") diff --git a/cvat/apps/engine/task.py b/cvat/apps/engine/task.py index 446cdabb082c..0f36674299fc 100644 --- a/cvat/apps/engine/task.py +++ b/cvat/apps/engine/task.py @@ -10,18 +10,18 @@ import re import rq import shutil +from collections.abc import Iterator, Sequence from copy import deepcopy from contextlib import closing from datetime import datetime, timezone from pathlib import Path -from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Sequence, Tuple, Union +from typing import Any, NamedTuple, Optional, Union from urllib import parse as urlparse from urllib import request as urlrequest import av import attrs import django_rq -from datumaro.util import take_by from django.conf import settings from django.db import transaction from django.forms.models import model_to_dict @@ -30,17 +30,20 @@ from cvat.apps.engine import models from cvat.apps.engine.log import ServerLogManager +from cvat.apps.engine.frame_provider import TaskFrameProvider from cvat.apps.engine.media_extractors import ( MEDIA_TYPES, CachingMediaIterator, IMediaReader, ImageListReader, Mpeg4ChunkWriter, Mpeg4CompressedChunkWriter, RandomAccessIterator, - ValidateDimension, ZipChunkWriter, ZipCompressedChunkWriter, get_mime, sort + ValidateDimension, ZipChunkWriter, ZipCompressedChunkWriter, get_mime, sort, + load_image, ) from cvat.apps.engine.models import RequestAction, RequestTarget from cvat.apps.engine.utils import ( - av_scan_paths, format_list,get_rq_job_meta, - define_dependent_job, get_rq_lock_by_user, preload_images + av_scan_paths, format_list, get_rq_job_meta, + define_dependent_job, get_rq_lock_by_user, take_by ) from cvat.apps.engine.rq_job_handler import RQId +from cvat.apps.engine.task_validation import HoneypotFrameSelector from cvat.utils.http import make_requests_session, PROXIES_FOR_UNTRUSTED_URLS from utils.dataset_manifest import ImageManifestManager, VideoManifestManager, is_manifest from utils.dataset_manifest.core import VideoManifestValidator, is_dataset_manifest @@ -75,7 +78,7 @@ def create( ############################# Internal implementation for server API -JobFileMapping = List[List[str]] +JobFileMapping = list[list[str]] class SegmentParams(NamedTuple): start_frame: int @@ -89,10 +92,10 @@ class SegmentsParams(NamedTuple): overlap: int def _copy_data_from_share_point( - server_files: List[str], + server_files: list[str], upload_dir: str, server_dir: Optional[str] = None, - server_files_exclude: Optional[List[str]] = None, + server_files_exclude: Optional[list[str]] = None, ): job = rq.get_current_job() job.meta['status'] = 'Data are being copied from source..' @@ -302,7 +305,7 @@ def _validate_data(counter, manifest_files=None): return counter, task_modes[0] def _validate_job_file_mapping( - db_task: models.Task, data: Dict[str, Any] + db_task: models.Task, data: dict[str, Any] ) -> Optional[JobFileMapping]: job_file_mapping = data.get('job_file_mapping', None) @@ -341,7 +344,7 @@ def _validate_job_file_mapping( return job_file_mapping def _validate_validation_params( - db_task: models.Task, data: Dict[str, Any], *, is_backup_restore: bool = False + db_task: models.Task, data: dict[str, Any], *, is_backup_restore: bool = False ) -> Optional[dict[str, Any]]: params = data.get('validation_params', {}) if not params: @@ -380,7 +383,7 @@ def _validate_validation_params( return params def _validate_manifest( - manifests: List[str], + manifests: list[str], root_dir: Optional[str], *, is_in_cloud: bool, @@ -453,7 +456,7 @@ def _download_data(urls, upload_dir): def _download_data_from_cloud_storage( db_storage: models.CloudStorage, - files: List[str], + files: list[str], upload_dir: str, ): cloud_storage_instance = db_storage_to_storage_instance(db_storage) @@ -477,7 +480,7 @@ def _read_dataset_manifest(path: str, *, create_index: bool = False) -> ImageMan def _restore_file_order_from_manifest( extractor: ImageListReader, manifest: ImageManifestManager, upload_dir: str -) -> List[str]: +) -> list[str]: """ Restores file ordering for the "predefined" file sorting method of the task creation. Checks for extra files in the input. @@ -509,7 +512,7 @@ def _restore_file_order_from_manifest( return [input_files[fn] for fn in manifest_files] def _create_task_manifest_based_on_cloud_storage_manifest( - sorted_media: List[str], + sorted_media: list[str], cloud_storage_manifest_prefix: str, cloud_storage_manifest: ImageManifestManager, manifest: ImageManifestManager, @@ -534,7 +537,7 @@ def _add_prefix(properties): def _create_task_manifest_from_cloud_data( db_storage: models.CloudStorage, - sorted_media: List[str], + sorted_media: list[str], manifest: ImageManifestManager, dimension: models.DimensionType = models.DimensionType.DIM_2D, *, @@ -555,7 +558,7 @@ def _create_task_manifest_from_cloud_data( @transaction.atomic def _create_thread( db_task: Union[int, models.Task], - data: Dict[str, Any], + data: dict[str, Any], *, is_backup_restore: bool = False, is_dataset_import: bool = False, @@ -1246,9 +1249,16 @@ def _update_status(msg: str) -> None: frames_per_job_count = min(len(pool_frames), frames_per_job_count) - non_pool_frames = list(set(all_frames).difference(pool_frames)) + non_pool_frames = sorted( + # set() doesn't guarantee ordering, + # so sort additionally before shuffling to make results reproducible + set(all_frames).difference(pool_frames) + ) rng.shuffle(non_pool_frames) + validation_frame_counts = {f: 0 for f in pool_frames} + frame_selector = HoneypotFrameSelector(validation_frame_counts, rng=rng) + # Don't use the same rng as for frame ordering to simplify random_seed maintenance in future # We still use the same seed, but in this case the frame selection rng is separate # from job frame ordering rng @@ -1259,11 +1269,9 @@ def _update_status(msg: str) -> None: new_db_images: list[models.Image] = [] validation_frames: list[int] = [] frame_idx_map: dict[int, int] = {} # new to original id - for job_frames in take_by(non_pool_frames, count=db_task.segment_size or db_data.size): - job_validation_frames = rng.choice( - pool_frames, size=frames_per_job_count, replace=False - ) - job_frames += job_validation_frames.tolist() + for job_frames in take_by(non_pool_frames, chunk_size=db_task.segment_size or db_data.size): + job_validation_frames = list(frame_selector.select_next_frames(frames_per_job_count)) + job_frames += job_validation_frames job_frame_ordering_rng.shuffle(job_frames) @@ -1499,6 +1507,9 @@ def _to_abs_frame(rel_frame: int) -> int: ): _create_static_chunks(db_task, media_extractor=extractor, upload_dir=upload_dir) + # Prepare the preview image and save it in the cache + TaskFrameProvider(db_task=db_task).get_preview() + def _create_static_chunks(db_task: models.Task, *, media_extractor: IMediaReader, upload_dir: str): @attrs.define class _ChunkProgressUpdater: @@ -1537,7 +1548,7 @@ def save_chunks( MEDIA_TYPES['archive']['extractor'], )) ): - chunk_data = preload_images(chunk_data) + chunk_data = list(map(load_image, chunk_data)) # TODO: extract into a class @@ -1588,7 +1599,7 @@ def save_chunks( frame_map = {} # frame number -> extractor frame number if isinstance(media_extractor, MEDIA_TYPES['video']['extractor']): - def _get_frame_size(frame_tuple: Tuple[av.VideoFrame, Any, Any]) -> int: + def _get_frame_size(frame_tuple: tuple[av.VideoFrame, Any, Any]) -> int: # There is no need to be absolutely precise here, # just need to provide the reasonable upper boundary. # Return bytes needed for 1 frame diff --git a/cvat/apps/engine/task_validation.py b/cvat/apps/engine/task_validation.py new file mode 100644 index 000000000000..fe76b4e99408 --- /dev/null +++ b/cvat/apps/engine/task_validation.py @@ -0,0 +1,48 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +from collections.abc import Mapping, Sequence +from typing import Generic, TypeVar + +import numpy as np + +_T = TypeVar("_T") + + +class HoneypotFrameSelector(Generic[_T]): + def __init__( + self, validation_frame_counts: Mapping[_T, int], *, rng: np.random.Generator | None = None + ): + self.validation_frame_counts = validation_frame_counts + + if not rng: + rng = np.random.default_rng() + + self.rng = rng + + def select_next_frames(self, count: int) -> Sequence[_T]: + # This approach guarantees that: + # - every GT frame is used + # - GT frames are used uniformly (at most min count + 1) + # - GT frames are not repeated in jobs + # - honeypot sets are different in jobs + # - honeypot sets are random + # if possible (if the job and GT counts allow this). + pick = [] + + for random_number in self.rng.random(count): + least_count = min(c for f, c in self.validation_frame_counts.items() if f not in pick) + least_used_frames = tuple( + f + for f, c in self.validation_frame_counts.items() + if f not in pick + if c == least_count + ) + + selected_item = int(random_number * len(least_used_frames)) + selected_frame = least_used_frames[selected_item] + pick.append(selected_frame) + self.validation_frame_counts[selected_frame] += 1 + + return pick diff --git a/cvat/apps/engine/tests/test_rest_api.py b/cvat/apps/engine/tests/test_rest_api.py index 8331856a5fad..e6ed6b6c0303 100644 --- a/cvat/apps/engine/tests/test_rest_api.py +++ b/cvat/apps/engine/tests/test_rest_api.py @@ -37,7 +37,8 @@ from rest_framework import status from rest_framework.test import APIClient -from datumaro.util.test_utils import current_function_name, TestDir +from cvat.apps.dataset_manager.tests.utils import TestDir +from cvat.apps.dataset_manager.util import current_function_name from cvat.apps.engine.models import (AttributeSpec, AttributeType, Data, Job, Project, Segment, StageChoice, StatusChoice, Task, Label, StorageMethodChoice, StorageChoice, DimensionType, SortingMethod) @@ -53,7 +54,6 @@ def create_db_users(cls): (group_admin, _) = Group.objects.get_or_create(name="admin") - (group_business, _) = Group.objects.get_or_create(name="business") (group_user, _) = Group.objects.get_or_create(name="user") (group_annotator, _) = Group.objects.get_or_create(name="worker") (group_somebody, _) = Group.objects.get_or_create(name="somebody") @@ -62,7 +62,7 @@ def create_db_users(cls): password="admin") user_admin.groups.add(group_admin) user_owner = User.objects.create_user(username="user1", password="user1") - user_owner.groups.add(group_business) + user_owner.groups.add(group_user) user_assignee = User.objects.create_user(username="user2", password="user2") user_assignee.groups.add(group_annotator) user_annotator = User.objects.create_user(username="user3", password="user3") @@ -637,6 +637,8 @@ def _check_data(self, user, data, is_full): extra_check("is_active", data) extra_check("last_login", data) extra_check("date_joined", data) + extra_check("has_analytics_access", data) + class UserListAPITestCase(UserAPITestCase): def _run_api_v2_users(self, user): @@ -671,6 +673,7 @@ def test_api_v2_users_no_auth(self): response = self._run_api_v2_users(None) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) + class UserSelfAPITestCase(UserAPITestCase): def _run_api_v2_users_self(self, user): with ForceLogin(user, self.client): @@ -698,6 +701,7 @@ def test_api_v2_users_self_no_auth(self): response = self._run_api_v2_users_self(None) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) + class UserGetAPITestCase(UserAPITestCase): def _run_api_v2_users_id(self, user, user_id): with ForceLogin(user, self.client): @@ -740,6 +744,7 @@ def test_api_v2_users_id_no_auth(self): response = self._run_api_v2_users_id(None, self.user.id) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) + class UserPartialUpdateAPITestCase(UserAPITestCase): def _run_api_v2_users_id(self, user, user_id, data): with ForceLogin(user, self.client): @@ -786,6 +791,7 @@ def test_api_v2_users_id_no_auth_partial(self): response = self._run_api_v2_users_id(None, self.user.id, data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) + class UserDeleteAPITestCase(UserAPITestCase): def _run_api_v2_users_id(self, user, user_id): with ForceLogin(user, self.client): diff --git a/cvat/apps/engine/tests/test_rest_api_3D.py b/cvat/apps/engine/tests/test_rest_api_3D.py index 9f000be5d218..67791c3c113c 100644 --- a/cvat/apps/engine/tests/test_rest_api_3D.py +++ b/cvat/apps/engine/tests/test_rest_api_3D.py @@ -20,9 +20,9 @@ from django.contrib.auth.models import Group, User from rest_framework import status +from cvat.apps.dataset_manager.tests.utils import TestDir from cvat.apps.engine.media_extractors import ValidateDimension from cvat.apps.dataset_manager.task import TaskAnnotation -from datumaro.util.test_utils import TestDir from cvat.apps.engine.tests.utils import get_paginated_collection, ApiTestBase, ForceLogin diff --git a/cvat/apps/engine/tests/utils.py b/cvat/apps/engine/tests/utils.py index 3d2a533d1e97..910323cac1f7 100644 --- a/cvat/apps/engine/tests/utils.py +++ b/cvat/apps/engine/tests/utils.py @@ -2,9 +2,10 @@ # # SPDX-License-Identifier: MIT +from collections.abc import Iterator, Sequence from contextlib import contextmanager from io import BytesIO -from typing import Any, Callable, Dict, Iterator, Sequence, TypeVar +from typing import Any, Callable, TypeVar import itertools import logging import os @@ -178,6 +179,6 @@ def get_paginated_collection( def filter_dict( - d: Dict[str, Any], *, keep: Sequence[str] = None, drop: Sequence[str] = None -) -> Dict[str, Any]: + d: dict[str, Any], *, keep: Sequence[str] = None, drop: Sequence[str] = None +) -> dict[str, Any]: return {k: v for k, v in d.items() if (not keep or k in keep) and (not drop or k not in drop)} diff --git a/cvat/apps/engine/utils.py b/cvat/apps/engine/utils.py index 256094837598..dd4533538f5a 100644 --- a/cvat/apps/engine/utils.py +++ b/cvat/apps/engine/utils.py @@ -4,14 +4,16 @@ # SPDX-License-Identifier: MIT import ast +from itertools import islice import cv2 as cv from collections import namedtuple +from collections.abc import Generator, Iterable, Iterator, Mapping, Sequence import hashlib import importlib import sys import traceback from contextlib import suppress, nullcontext -from typing import Any, Dict, Optional, Callable, Sequence, Union, Iterable +from typing import Any, Callable, Optional, TypeVar, Union import subprocess import os import urllib.parse @@ -97,6 +99,9 @@ def execute_python_code(source_code, global_vars=None, local_vars=None): line_number = traceback.extract_tb(tb)[-1][1] raise InterpreterError("{} at line {}: {}".format(error_class, line_number, details)) +class CvatChunkTimestampMismatchError(Exception): + pass + def av_scan_paths(*paths): if 'yes' == os.environ.get('CLAM_AV'): command = ['clamscan', '--no-summary', '-i', '-o'] @@ -198,14 +203,25 @@ def define_dependent_job( return Dependency(jobs=[sorted(user_jobs, key=lambda job: job.created_at)[-1]], allow_failure=True) if user_jobs else None -def get_rq_lock_by_user(queue: DjangoRQ, user_id: int) -> Union[Lock, nullcontext]: +def get_rq_lock_by_user(queue: DjangoRQ, user_id: int, *, timeout: Optional[int] = 30, blocking_timeout: Optional[int] = None) -> Union[Lock, nullcontext]: if settings.ONE_RUNNING_JOB_IN_QUEUE_PER_USER: - return queue.connection.lock(f'{queue.name}-lock-{user_id}', timeout=30) + return queue.connection.lock( + name=f'{queue.name}-lock-{user_id}', + timeout=timeout, + blocking_timeout=blocking_timeout, + ) return nullcontext() -def get_rq_lock_for_job(queue: DjangoRQ, rq_id: str) -> Lock: +def get_rq_lock_for_job(queue: DjangoRQ, rq_id: str, *, timeout: int = 60, blocking_timeout: int = 50) -> Lock: # lock timeout corresponds to the nginx request timeout (proxy_read_timeout) - return queue.connection.lock(f'lock-for-job-{rq_id}'.lower(), timeout=60) + + assert timeout is not None + assert blocking_timeout is not None + return queue.connection.lock( + name=f'lock-for-job-{rq_id}'.lower(), + timeout=timeout, + blocking_timeout=blocking_timeout, + ) def get_rq_job_meta( request: HttpRequest, @@ -247,7 +263,7 @@ def get_rq_job_meta( return meta def reverse(viewname, *, args=None, kwargs=None, - query_params: Optional[Dict[str, str]] = None, + query_params: Optional[dict[str, str]] = None, request: Optional[HttpRequest] = None, ) -> str: """ @@ -266,7 +282,7 @@ def reverse(viewname, *, args=None, kwargs=None, def get_server_url(request: HttpRequest) -> str: return request.build_absolute_uri('/') -def build_field_filter_params(field: str, value: Any) -> Dict[str, str]: +def build_field_filter_params(field: str, value: Any) -> dict[str, str]: """ Builds a collection filter query params for a single field and value. """ @@ -363,13 +379,6 @@ def sendfile( return _sendfile(request, filename, attachment, attachment_filename, mimetype, encoding) -def preload_image(image: tuple[str, str, str])-> tuple[Image.Image, str, str]: - pil_img = Image.open(image[0]) - pil_img.load() - return pil_img, image[1], image[2] - -def preload_images(images: Iterable[tuple[str, str, str]]) -> list[tuple[Image.Image, str, str]]: - return list(map(preload_image, images)) def build_backup_file_name( *, @@ -417,10 +426,22 @@ def directory_tree(path, max_depth=None) -> str: def is_dataset_export(request: HttpRequest) -> bool: return to_bool(request.query_params.get('save_images', False)) +_T = TypeVar('_T') -def chunked_list(lst, chunk_size): - for i in range(0, len(lst), chunk_size): - yield lst[i:i + chunk_size] +def take_by(iterable: Iterable[_T], chunk_size: int) -> Generator[list[_T], None, None]: + """ + Returns elements from the input iterable by batches of N items. + ('abcdefg', 3) -> ['a', 'b', 'c'], ['d', 'e', 'f'], ['g'] + """ + # can be changed to itertools.batched after migration to python3.12 + + it = iter(iterable) + while True: + batch = list(islice(it, chunk_size)) + if len(batch) == 0: + break + + yield batch FORMATTED_LIST_DISPLAY_THRESHOLD = 10 @@ -439,3 +460,34 @@ def format_list( separator.join(items[:max_items]), f" (and {remainder_count} more)" if 0 < remainder_count else "", ) + + +_K = TypeVar("_K") +_V = TypeVar("_V") + + +def grouped( + items: Iterator[_V] | Iterable[_V], *, key: Callable[[_V], _K] +) -> Mapping[_K, Sequence[_V]]: + """ + Returns a mapping with input iterable elements grouped by key, for example: + + grouped( + [("apple1", "red"), ("apple2", "green"), ("apple3", "red")], + key=lambda v: v[1] + ) + -> + { + "red": [("apple1", "red"), ("apple3", "red")], + "green": [("apple2", "green")] + } + + Similar to itertools.groupby, but allows reiteration on resulting groups. + """ + + # Can be implemented with itertools.groupby, but it requires extra sorting for input elements + grouped_items = {} + for item in items: + grouped_items.setdefault(key(item), []).append(item) + + return grouped_items diff --git a/cvat/apps/engine/view_utils.py b/cvat/apps/engine/view_utils.py index 2acb8bac780f..6f5dc298a7b6 100644 --- a/cvat/apps/engine/view_utils.py +++ b/cvat/apps/engine/view_utils.py @@ -4,7 +4,7 @@ # NOTE: importing in the utils.py header leads to circular importing -from typing import Optional, Type +from typing import Optional from django.db.models.query import QuerySet from django.http.request import HttpRequest @@ -23,9 +23,9 @@ def make_paginated_response( queryset: QuerySet, *, viewset: GenericViewSet, - response_type: Optional[Type[HttpResponse]] = None, - serializer_type: Optional[Type[Serializer]] = None, - request: Optional[Type[HttpRequest]] = None, + response_type: Optional[type[HttpResponse]] = None, + serializer_type: Optional[type[Serializer]] = None, + request: Optional[type[HttpRequest]] = None, **serializer_params ): # Adapted from the mixins.ListModelMixin.list() @@ -54,7 +54,7 @@ def make_paginated_response( return response_type(serializer.data) -def list_action(serializer_class: Type[Serializer], **kwargs): +def list_action(serializer_class: type[Serializer], **kwargs): params = dict( detail=True, methods=["GET"], diff --git a/cvat/apps/engine/views.py b/cvat/apps/engine/views.py index 0dd84acab90f..eb39f6732c18 100644 --- a/cvat/apps/engine/views.py +++ b/cvat/apps/engine/views.py @@ -16,8 +16,9 @@ from contextlib import suppress from PIL import Image from types import SimpleNamespace -from typing import Optional, Any, Dict, List, Union, cast, Callable, Mapping, Iterable +from typing import Optional, Any, Union, cast, Callable from collections import namedtuple +from collections.abc import Mapping, Iterable from copy import copy from datetime import datetime from redis.exceptions import ConnectionError as RedisConnectionError @@ -106,7 +107,7 @@ from .log import ServerLogManager from cvat.apps.iam.filters import ORGANIZATION_OPEN_API_PARAMETERS from cvat.apps.iam.permissions import PolicyEnforcer, IsAuthenticatedOrReadPublicResource -from cvat.apps.engine.cache import MediaCache +from cvat.apps.engine.cache import MediaCache, CvatChunkTimestampMismatchError, LockError from cvat.apps.engine.permissions import (CloudStoragePermission, CommentPermission, IssuePermission, JobPermission, LabelPermission, ProjectPermission, TaskPermission, UserPermission) @@ -118,6 +119,7 @@ _DATA_CHECKSUM_HEADER_NAME = 'X-Checksum' _DATA_UPDATED_DATE_HEADER_NAME = 'X-Updated-Date' +_RETRY_AFTER_TIMEOUT = 10 @extend_schema(tags=['server']) class ServerViewSet(viewsets.ViewSet): @@ -226,7 +228,7 @@ def annotation_formats(request): def plugins(request): data = { 'GIT_INTEGRATION': False, # kept for backwards compatibility - 'ANALYTICS': to_bool(os.environ.get("CVAT_ANALYTICS", False)), + 'ANALYTICS': settings.ANALYTICS_ENABLED, 'MODELS': to_bool(os.environ.get("CVAT_SERVERLESS", False)), 'PREDICT': False, # FIXME: it is unused anymore (for UI only) } @@ -723,6 +725,11 @@ def __call__(self): msg = str(ex) if not isinstance(ex, ValidationError) else \ '\n'.join([str(d) for d in ex.detail]) return Response(data=msg, status=ex.status_code) + except (TimeoutError, CvatChunkTimestampMismatchError, LockError): + return Response( + status=status.HTTP_429_TOO_MANY_REQUESTS, + headers={'Retry-After': _RETRY_AFTER_TIMEOUT}, + ) @abstractmethod def _get_chunk_response_headers(self, chunk_data: DataWithMeta) -> dict[str, str]: ... @@ -806,20 +813,26 @@ def __call__(self): # Reproduce the task chunk indexing frame_provider = self._get_frame_provider() - if self.index is not None: - data = frame_provider.get_chunk( - self.index, quality=self.quality, is_task_chunk=False + try: + if self.index is not None: + data = frame_provider.get_chunk( + self.index, quality=self.quality, is_task_chunk=False + ) + else: + data = frame_provider.get_chunk( + self.number, quality=self.quality, is_task_chunk=True + ) + + return HttpResponse( + data.data.getvalue(), + content_type=data.mime, + headers=self._get_chunk_response_headers(data), ) - else: - data = frame_provider.get_chunk( - self.number, quality=self.quality, is_task_chunk=True + except (TimeoutError, CvatChunkTimestampMismatchError, LockError): + return Response( + status=status.HTTP_429_TOO_MANY_REQUESTS, + headers={'Retry-After': _RETRY_AFTER_TIMEOUT}, ) - - return HttpResponse( - data.data.getvalue(), - content_type=data.mime, - headers=self._get_chunk_response_headers(data), - ) else: return super().__call__() @@ -1064,7 +1077,7 @@ def _maybe_append_upload_info_entry(self, filename: str): filename = self._prepare_upload_info_entry(filename) task_data.client_files.get_or_create(file=filename) - def _append_upload_info_entries(self, client_files: List[Dict[str, Any]]): + def _append_upload_info_entries(self, client_files: list[dict[str, Any]]): # batch version of _maybe_append_upload_info_entry() without optional insertion task_data = cast(Data, self._object.data) task_data.client_files.bulk_create([ @@ -1072,7 +1085,7 @@ def _append_upload_info_entries(self, client_files: List[Dict[str, Any]]): for cf in client_files ]) - def _sort_uploaded_files(self, uploaded_files: List[str], ordering: List[str]) -> List[str]: + def _sort_uploaded_files(self, uploaded_files: list[str], ordering: list[str]) -> list[str]: """ Applies file ordering for the "predefined" file sorting method of the task creation. @@ -1760,7 +1773,7 @@ def preview(self, request, pk): @action(detail=True, methods=["GET", "PATCH"], url_path='validation_layout') @transaction.atomic def validation_layout(self, request, pk): - db_task = self.get_object() # call check_object_permissions as well + db_task = cast(models.Task, self.get_object()) # call check_object_permissions as well validation_layout = getattr(db_task.data, 'validation_layout', None) @@ -2968,6 +2981,11 @@ def preview(self, request, pk): '\n'.join([str(d) for d in ex.detail]) slogger.cloud_storage[pk].info(msg) return Response(data=msg, status=ex.status_code) + except (TimeoutError, CvatChunkTimestampMismatchError, LockError): + return Response( + status=status.HTTP_429_TOO_MANY_REQUESTS, + headers={'Retry-After': _RETRY_AFTER_TIMEOUT}, + ) except Exception as ex: slogger.glob.error(str(ex)) return Response("An internal error has occurred", @@ -3254,6 +3272,9 @@ def perform_destroy(self, instance): def rq_exception_handler(rq_job, exc_type, exc_value, tb): rq_job.meta[RQJobMetaField.FORMATTED_EXCEPTION] = "".join( traceback.format_exception_only(exc_type, exc_value)) + if rq_job.origin == settings.CVAT_QUEUES.CHUNKS.value: + rq_job.meta[RQJobMetaField.EXCEPTION_TYPE] = exc_type + rq_job.meta[RQJobMetaField.EXCEPTION_ARGS] = exc_value.args rq_job.save_meta() return True @@ -3548,7 +3569,7 @@ def get_queryset(self): def queues(self) -> Iterable[DjangoRQ]: return (django_rq.get_queue(queue_name) for queue_name in self.SUPPORTED_QUEUES) - def _get_rq_jobs_from_queue(self, queue: DjangoRQ, user_id: int) -> List[RQJob]: + def _get_rq_jobs_from_queue(self, queue: DjangoRQ, user_id: int) -> list[RQJob]: job_ids = set(queue.get_job_ids() + queue.started_job_registry.get_job_ids() + queue.finished_job_registry.get_job_ids() + @@ -3568,7 +3589,7 @@ def _get_rq_jobs_from_queue(self, queue: DjangoRQ, user_id: int) -> List[RQJob]: return jobs - def _get_rq_jobs(self, user_id: int) -> List[RQJob]: + def _get_rq_jobs(self, user_id: int) -> list[RQJob]: """ Get all RQ jobs for a specific user and return them as a list of RQJob objects. diff --git a/cvat/apps/events/event.py b/cvat/apps/events/event.py index ae519b568644..a4afff968549 100644 --- a/cvat/apps/events/event.py +++ b/cvat/apps/events/event.py @@ -20,6 +20,8 @@ class EventScopes: "task": ["create", "update", "delete"], "job": ["create", "update", "delete"], "organization": ["create", "update", "delete"], + "membership": ["create", "update", "delete"], + "invitation": ["create", "delete"], "user": ["create", "update", "delete"], "cloudstorage": ["create", "update", "delete"], "issue": ["create", "update", "delete"], @@ -28,6 +30,7 @@ class EventScopes: "label": ["create", "update", "delete"], "dataset": ["export", "import"], "function": ["call"], + "webhook": ["create", "update", "delete"], } @classmethod diff --git a/cvat/apps/events/handlers.py b/cvat/apps/events/handlers.py index f2d3f7577617..8f29f91d9a1a 100644 --- a/cvat/apps/events/handlers.py +++ b/cvat/apps/events/handlers.py @@ -4,7 +4,7 @@ import datetime import traceback -from typing import Optional, Union +from typing import Any, Optional, Union import rq from crum import get_current_request, get_current_user @@ -26,6 +26,8 @@ MembershipReadSerializer, OrganizationReadSerializer) from cvat.apps.engine.rq_job_handler import RQJobMetaField +from cvat.apps.webhooks.models import Webhook +from cvat.apps.webhooks.serializers import WebhookReadSerializer from .cache import get_cache from .event import event_scope, record_server_event @@ -66,6 +68,7 @@ def task_id(instance): except Exception: return None + def job_id(instance): if isinstance(instance, Job): return instance.id @@ -78,6 +81,7 @@ def job_id(instance): except Exception: return None + def get_user(instance=None): # Try to get current user from request user = get_current_user() @@ -97,6 +101,7 @@ def get_user(instance=None): return None + def get_request(instance=None): request = get_current_request() if request is not None: @@ -111,6 +116,7 @@ def get_request(instance=None): return None + def _get_value(obj, key): if obj is not None: if isinstance(obj, dict): @@ -119,22 +125,27 @@ def _get_value(obj, key): return None + def request_id(instance=None): request = get_request(instance) return _get_value(request, "uuid") + def user_id(instance=None): current_user = get_user(instance) return _get_value(current_user, "id") + def user_name(instance=None): current_user = get_user(instance) return _get_value(current_user, "username") + def user_email(instance=None): current_user = get_user(instance) return _get_value(current_user, "email") or None + def organization_slug(instance): if isinstance(instance, Organization): return instance.slug @@ -147,6 +158,7 @@ def organization_slug(instance): except Exception: return None + def get_instance_diff(old_data, data): ignore_related_fields = ( "labels", @@ -164,7 +176,8 @@ def get_instance_diff(old_data, data): return diff -def _cleanup_fields(obj): + +def _cleanup_fields(obj: dict[str, Any]) -> dict[str, Any]: fields=( "slug", "id", @@ -183,6 +196,7 @@ def _cleanup_fields(obj): "url", "issues", "attributes", + "key", ) subfields=( "url", @@ -198,6 +212,7 @@ def _cleanup_fields(obj): data[k] = v return data + def _get_object_name(instance): if isinstance(instance, Organization) or \ isinstance(instance, Project) or \ @@ -217,34 +232,32 @@ def _get_object_name(instance): return None + +SERIALIZERS = [ + (Organization, OrganizationReadSerializer), + (Project, ProjectReadSerializer), + (Task, TaskReadSerializer), + (Job, JobReadSerializer), + (User, BasicUserSerializer), + (CloudStorage, CloudStorageReadSerializer), + (Issue, IssueReadSerializer), + (Comment, CommentReadSerializer), + (Label, LabelSerializer), + (Membership, MembershipReadSerializer), + (Invitation, InvitationReadSerializer), + (Webhook, WebhookReadSerializer), +] + + def get_serializer(instance): context = { "request": get_current_request() } serializer = None - if isinstance(instance, Organization): - serializer = OrganizationReadSerializer(instance=instance, context=context) - if isinstance(instance, Project): - serializer = ProjectReadSerializer(instance=instance, context=context) - if isinstance(instance, Task): - serializer = TaskReadSerializer(instance=instance, context=context) - if isinstance(instance, Job): - serializer = JobReadSerializer(instance=instance, context=context) - if isinstance(instance, User): - serializer = BasicUserSerializer(instance=instance, context=context) - if isinstance(instance, CloudStorage): - serializer = CloudStorageReadSerializer(instance=instance, context=context) - if isinstance(instance, Issue): - serializer = IssueReadSerializer(instance=instance, context=context) - if isinstance(instance, Comment): - serializer = CommentReadSerializer(instance=instance, context=context) - if isinstance(instance, Label): - serializer = LabelSerializer(instance=instance, context=context) - if isinstance(instance, Membership): - serializer = MembershipReadSerializer(instance=instance, context=context) - if isinstance(instance, Invitation): - serializer = InvitationReadSerializer(instance=instance, context=context) + for model, serializer_class in SERIALIZERS: + if isinstance(instance, model): + serializer = serializer_class(instance=instance, context=context) return serializer @@ -254,6 +267,7 @@ def get_serializer_without_url(instance): serializer.fields.pop("url", None) return serializer + def handle_create(scope, instance, **kwargs): oid = organization_id(instance) oslug = organization_slug(instance) @@ -288,6 +302,7 @@ def handle_create(scope, instance, **kwargs): payload=payload, ) + def handle_update(scope, instance, old_instance, **kwargs): oid = organization_id(instance) oslug = organization_slug(instance) @@ -322,12 +337,14 @@ def handle_update(scope, instance, old_instance, **kwargs): payload={"old_value": change["old_value"]}, ) + def handle_delete(scope, instance, store_in_deletion_cache=False, **kwargs): deletion_cache = get_cache() + instance_id = getattr(instance, "id", None) if store_in_deletion_cache: deletion_cache.set( instance.__class__, - instance.id, + instance_id, { "oid": organization_id(instance), "oslug": organization_slug(instance), @@ -338,7 +355,7 @@ def handle_delete(scope, instance, store_in_deletion_cache=False, **kwargs): ) return - instance_meta_info = deletion_cache.pop(instance.__class__, instance.id) + instance_meta_info = deletion_cache.pop(instance.__class__, instance_id) if instance_meta_info: oid = instance_meta_info["oid"] oslug = instance_meta_info["oslug"] @@ -360,7 +377,7 @@ def handle_delete(scope, instance, store_in_deletion_cache=False, **kwargs): scope=scope, request_id=request_id(), on_commit=True, - obj_id=getattr(instance, 'id', None), + obj_id=instance_id, obj_name=_get_object_name(instance), org_id=oid, org_slug=oslug, @@ -372,15 +389,12 @@ def handle_delete(scope, instance, store_in_deletion_cache=False, **kwargs): user_email=uemail, ) + def handle_annotations_change(instance, annotations, action, **kwargs): def filter_data(data): filtered_data = { "id": data["id"], - "frame": data["frame"], - "attributes": data["attributes"], } - if label_id := data.get("label_id"): - filtered_data["label_id"] = label_id return filtered_data diff --git a/cvat/apps/events/rules/events.rego b/cvat/apps/events/rules/events.rego index 0152ec721ba8..58ec43763b2f 100644 --- a/cvat/apps/events/rules/events.rego +++ b/cvat/apps/events/rules/events.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/events/rules/tests/generators/events_test.gen.rego.py b/cvat/apps/events/rules/tests/generators/events_test.gen.rego.py index da9d54d79e22..dee2d4a68963 100644 --- a/cvat/apps/events/rules/tests/generators/events_test.gen.rego.py +++ b/cvat/apps/events/rules/tests/generators/events_test.gen.rego.py @@ -42,7 +42,7 @@ def read_rules(name): SCOPES = list({rule["scope"] for rule in simple_rules}) CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] SAME_ORG = [True, False] diff --git a/cvat/apps/events/serializers.py b/cvat/apps/events/serializers.py index 05d89d41ef18..9b70f17429c9 100644 --- a/cvat/apps/events/serializers.py +++ b/cvat/apps/events/serializers.py @@ -27,20 +27,23 @@ class EventSerializer(serializers.Serializer): org_slug = serializers.CharField(required=False, allow_null=True) payload = serializers.CharField(required=False, allow_null=True) + class ClientEventsSerializer(serializers.Serializer): - ALLOWED_SCOPES = frozenset(( - 'load:cvat', 'load:job', 'save:job','load:workspace', - 'upload:annotations', # TODO: remove in next releases - 'lock:object', # TODO: remove in next releases - 'change:attribute', # TODO: remove in next releases - 'change:label', # TODO: remove in next releases - 'send:exception', 'join:objects', 'change:frame', - 'draw:object', 'paste:object', 'copy:object', 'propagate:object', - 'drag:object', 'resize:object', 'delete:object', - 'merge:objects', 'split:objects', 'group:objects', 'slice:object', - 'zoom:image', 'fit:image', 'rotate:image', 'action:undo', 'action:redo', - 'debug:info', 'run:annotations_action', 'click:element' - )) + ALLOWED_SCOPES = { + 'client': frozenset(( + 'load:cvat', 'load:job', 'save:job','load:workspace', + 'upload:annotations', # TODO: remove in next releases + 'lock:object', # TODO: remove in next releases + 'change:attribute', # TODO: remove in next releases + 'change:label', # TODO: remove in next releases + 'send:exception', 'join:objects', 'change:frame', + 'draw:object', 'paste:object', 'copy:object', 'propagate:object', + 'drag:object', 'resize:object', 'delete:object', + 'merge:objects', 'split:objects', 'group:objects', 'slice:object', + 'zoom:image', 'fit:image', 'rotate:image', 'action:undo', 'action:redo', + 'debug:info', 'run:annotations_action', 'click:element', + )), + } events = EventSerializer(many=True, default=[]) previous_event = EventSerializer(default=None, allow_null=True, write_only=True) @@ -50,8 +53,13 @@ def to_internal_value(self, data): data = super().to_internal_value(data) request = self.context.get("request") org = request.iam_context["organization"] - org_id = getattr(org, "id", None) - org_slug = getattr(org, "slug", None) + user_and_org_data = { + "org_id": getattr(org, "id", None), + "org_slug": getattr(org, "slug", None), + "user_id": request.user.id, + "user_name": request.user.username, + "user_email": request.user.email, + } send_time = data["timestamp"] receive_time = datetime.datetime.now(datetime.timezone.utc) @@ -62,8 +70,9 @@ def to_internal_value(self, data): for event in data["events"]: scope = event["scope"] - if scope not in ClientEventsSerializer.ALLOWED_SCOPES: - raise serializers.ValidationError({ "scope": f"Event scope **{scope}** is not allowed from client" }) + source = event.get("source", "client") + if scope not in ClientEventsSerializer.ALLOWED_SCOPES.get(source, []): + raise serializers.ValidationError({"scope": f"Event scope **{scope}** is not allowed from {source}"}) try: payload = json.loads(event.get("payload", "{}")) @@ -72,13 +81,9 @@ def to_internal_value(self, data): event.update({ "timestamp": event["timestamp"] + time_correction, - "source": "client", - "org_id": org_id, - "org_slug": org_slug, - "user_id": request.user.id, - "user_name": request.user.username, - "user_email": request.user.email, + "source": source, "payload": json.dumps(payload), + **(user_and_org_data if source == 'client' else {}) }) return data diff --git a/cvat/apps/events/signals.py b/cvat/apps/events/signals.py index 25d320c35e1d..c304fc69b61c 100644 --- a/cvat/apps/events/signals.py +++ b/cvat/apps/events/signals.py @@ -2,26 +2,30 @@ # # SPDX-License-Identifier: MIT -from django.dispatch import receiver -from django.db.models.signals import pre_save, post_save, post_delete from django.core.exceptions import ObjectDoesNotExist +from django.db.models.signals import post_delete, post_save, pre_save +from django.dispatch import receiver from cvat.apps.engine.models import ( - TimestampedModel, - Project, - Task, - Job, - User, CloudStorage, - Issue, Comment, + Issue, + Job, Label, + Project, + Task, + TimestampedModel, + User, ) -from cvat.apps.organizations.models import Organization +from cvat.apps.organizations.models import Invitation, Membership, Organization +from cvat.apps.webhooks.models import Webhook -from .handlers import handle_update, handle_create, handle_delete from .event import EventScopeChoice, event_scope +from .handlers import handle_create, handle_delete, handle_update + +@receiver(pre_save, sender=Webhook, dispatch_uid="webhook:update_receiver") +@receiver(pre_save, sender=Membership, dispatch_uid="membership:update_receiver") @receiver(pre_save, sender=Organization, dispatch_uid="organization:update_receiver") @receiver(pre_save, sender=Project, dispatch_uid="project:update_receiver") @receiver(pre_save, sender=Task, dispatch_uid="task:update_receiver") @@ -34,7 +38,8 @@ def resource_update(sender, *, instance, update_fields, **kwargs): if ( isinstance(instance, TimestampedModel) - and update_fields and list(update_fields) == ["updated_date"] + and update_fields + and list(update_fields) == ["updated_date"] ): # This is an optimization for the common case where only the date is bumped # (see `TimestampedModel.touch`). Since the actual update of the field will @@ -57,6 +62,10 @@ def resource_update(sender, *, instance, update_fields, **kwargs): handle_update(scope=scope, instance=instance, old_instance=old_instance, **kwargs) + +@receiver(post_save, sender=Webhook, dispatch_uid="webhook:create_receiver") +@receiver(post_save, sender=Membership, dispatch_uid="membership:create_receiver") +@receiver(post_save, sender=Invitation, dispatch_uid="invitation:create_receiver") @receiver(post_save, sender=Organization, dispatch_uid="organization:create_receiver") @receiver(post_save, sender=Project, dispatch_uid="project:create_receiver") @receiver(post_save, sender=Task, dispatch_uid="task:create_receiver") @@ -78,6 +87,10 @@ def resource_create(sender, instance, created, **kwargs): handle_create(scope=scope, instance=instance, **kwargs) + +@receiver(post_delete, sender=Webhook, dispatch_uid="webhook:delete_receiver") +@receiver(post_delete, sender=Membership, dispatch_uid="membership:delete_receiver") +@receiver(post_delete, sender=Invitation, dispatch_uid="invitation:delete_receiver") @receiver(post_delete, sender=Organization, dispatch_uid="organization:delete_receiver") @receiver(post_delete, sender=Project, dispatch_uid="project:delete_receiver") @receiver(post_delete, sender=Task, dispatch_uid="task:delete_receiver") diff --git a/cvat/apps/events/tests/test_events.py b/cvat/apps/events/tests/test_events.py index 990daa1ea325..81b054171dce 100644 --- a/cvat/apps/events/tests/test_events.py +++ b/cvat/apps/events/tests/test_events.py @@ -5,7 +5,7 @@ import json import unittest from datetime import datetime, timedelta, timezone -from typing import List, Optional +from typing import Optional from django.contrib.auth import get_user_model from django.test import RequestFactory @@ -42,7 +42,7 @@ def _working_time(event: dict) -> int: return payload["working_time"] @staticmethod - def _deserialize(events: List[dict], previous_event: Optional[dict] = None) -> List[dict]: + def _deserialize(events: list[dict], previous_event: Optional[dict] = None) -> list[dict]: request = RequestFactory().post("/api/events") request.user = get_user_model()(id=100, username="testuser", email="testuser@example.org") request.iam_context = { diff --git a/cvat/apps/iam/admin.py b/cvat/apps/iam/admin.py index 1cfceb19d600..648e15dc2da4 100644 --- a/cvat/apps/iam/admin.py +++ b/cvat/apps/iam/admin.py @@ -1,4 +1,5 @@ # Copyright (C) 2021-2022 Intel Corporation +# Copyright (C) 2024 CVAT.ai Corporation # # SPDX-License-Identifier: MIT @@ -7,7 +8,19 @@ from django.contrib.auth.admin import GroupAdmin, UserAdmin from django.utils.translation import gettext_lazy as _ +from cvat.apps.engine.models import Profile + + +class ProfileInline(admin.StackedInline): + model = Profile + + fieldsets = ( + (None, {'fields': ('has_analytics_access', )}), + ) + + class CustomUserAdmin(UserAdmin): + inlines = (ProfileInline,) list_display = ("username", "email", "first_name", "last_name", "is_active", "is_staff") fieldsets = ( (None, {'fields': ('username', 'password')}), diff --git a/cvat/apps/iam/migrations/0001_remove_business_group.py b/cvat/apps/iam/migrations/0001_remove_business_group.py new file mode 100644 index 000000000000..2bf1a56b4065 --- /dev/null +++ b/cvat/apps/iam/migrations/0001_remove_business_group.py @@ -0,0 +1,31 @@ +# Generated by Django 4.2.16 on 2024-10-30 12:03 +from django.conf import settings +from django.db import migrations + + +BUSINESS_GROUP_NAME = "business" +USER_GROUP_NAME = "user" + + +def delete_business_group(apps, schema_editor): + Group = apps.get_model('auth', 'Group') + User = apps.get_model(settings.AUTH_USER_MODEL) + + if user_group := Group.objects.filter(name=USER_GROUP_NAME).first(): + user_group.user_set.add(*User.objects.filter(groups__name=BUSINESS_GROUP_NAME)) + + Group.objects.filter(name=BUSINESS_GROUP_NAME).delete() + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ] + + operations = [ + migrations.RunPython( + delete_business_group, + reverse_code=migrations.RunPython.noop, + ), + ] diff --git a/cvat/apps/iam/migrations/__init__.py b/cvat/apps/iam/migrations/__init__.py new file mode 100644 index 000000000000..bd6d6576ecf2 --- /dev/null +++ b/cvat/apps/iam/migrations/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT diff --git a/cvat/apps/iam/permissions.py b/cvat/apps/iam/permissions.py index 366f85192a8d..d4925426724a 100644 --- a/cvat/apps/iam/permissions.py +++ b/cvat/apps/iam/permissions.py @@ -8,14 +8,15 @@ import importlib import operator from abc import ABCMeta, abstractmethod +from collections.abc import Sequence from enum import Enum from pathlib import Path -from typing import Any, Dict, List, Optional, Sequence, TypeVar +from typing import Any, Optional, TypeVar from attrs import define, field from django.apps import AppConfig from django.conf import settings -from django.db.models import Q, Model +from django.db.models import Model, Q from rest_framework.exceptions import PermissionDenied from rest_framework.permissions import BasePermission @@ -24,14 +25,17 @@ from .utils import add_opa_rules_path + class StrEnum(str, Enum): def __str__(self) -> str: return self.value + @define class PermissionResult: allow: bool - reasons: List[str] = field(factory=list) + reasons: list[str] = field(factory=list) + def get_organization(request, obj): # Try to get organization from an object otherwise, return the organization that is specified in query parameters @@ -56,6 +60,7 @@ def get_organization(request, obj): return request.iam_context['organization'] + def get_membership(request, organization): if organization is None: return None @@ -66,6 +71,7 @@ def get_membership(request, organization): is_active=True ).first() + def build_iam_context(request, organization: Optional[Organization], membership: Optional[Membership]): return { 'user_id': request.user.id, @@ -78,7 +84,7 @@ def build_iam_context(request, organization: Optional[Organization], membership: } -def get_iam_context(request, obj) -> Dict[str, Any]: +def get_iam_context(request, obj) -> dict[str, Any]: organization = get_organization(request, obj) membership = get_membership(request, organization) @@ -126,7 +132,7 @@ def __init__(self, **kwargs): 'auth': { 'user': { 'id': self.user_id, - 'privilege': self.group_name + 'privilege': self.group_name, }, 'organization': { 'id': self.org_id, @@ -193,9 +199,9 @@ def filter(self, queryset): q_objects.append(Q()) # By default, a QuerySet will not eliminate duplicate rows. If your - # query spans multiple tables (e.g. members__user_id, owner_id), it’s + # query spans multiple tables (e.g. members__user_id, owner_id), it's # possible to get duplicate results when a QuerySet is evaluated. - # That’s when you’d use distinct(). + # That's when you'd use distinct(). return queryset.filter(q_objects[0]).distinct() @classmethod @@ -219,11 +225,14 @@ def get_per_field_update_scopes(cls, request, scopes_per_field): return scopes + T = TypeVar('T', bound=Model) + def is_public_obj(obj: T) -> bool: return getattr(obj, "is_public", False) + class PolicyEnforcer(BasePermission): # pylint: disable=no-self-use def check_permission(self, request, view, obj) -> bool: @@ -258,13 +267,15 @@ def is_metadata_request(request, view): return request.method == 'OPTIONS' \ or (request.method == 'POST' and view.action == 'metadata' and len(request.data) == 0) + class IsAuthenticatedOrReadPublicResource(BasePermission): def has_object_permission(self, request, view, obj) -> bool: return bool( - request.user and request.user.is_authenticated or - request.method == 'GET' and is_public_obj(obj) + (request.user and request.user.is_authenticated) or + (request.method == 'GET' and is_public_obj(obj)) ) + def load_app_permissions(config: AppConfig) -> None: """ Ensures that permissions and OPA rules from the given app are loaded. diff --git a/cvat/apps/iam/rules/tests/generate_tests.py b/cvat/apps/iam/rules/tests/generate_tests.py index 254930e73d61..729de6732eb2 100755 --- a/cvat/apps/iam/rules/tests/generate_tests.py +++ b/cvat/apps/iam/rules/tests/generate_tests.py @@ -7,9 +7,10 @@ import subprocess import sys from argparse import ArgumentParser, Namespace +from collections.abc import Sequence from concurrent.futures import ThreadPoolExecutor from functools import partial -from typing import Optional, Sequence +from typing import Optional from pathlib import Path REPO_ROOT = Path(__file__).resolve().parents[5] diff --git a/cvat/apps/iam/rules/utils.rego b/cvat/apps/iam/rules/utils.rego index 4bd64f0ae108..148b2ec1a2a1 100644 --- a/cvat/apps/iam/rules/utils.rego +++ b/cvat/apps/iam/rules/utils.rego @@ -4,7 +4,6 @@ import rego.v1 # Groups ADMIN := "admin" -BUSINESS := "business" USER := "user" WORKER := "worker" @@ -65,7 +64,6 @@ UPDATE_VALIDATION_LAYOUT := "update:validation_layout" get_priority(privilege) := { ADMIN: 0, - BUSINESS: 50, USER: 75, WORKER: 100, null: 1000 @@ -79,10 +77,6 @@ is_admin if { input.auth.user.privilege == ADMIN } -is_business if { - input.auth.user.privilege == BUSINESS -} - is_user if { input.auth.user.privilege == USER } diff --git a/cvat/apps/iam/serializers.py b/cvat/apps/iam/serializers.py index 862712454de0..967b696a4f21 100644 --- a/cvat/apps/iam/serializers.py +++ b/cvat/apps/iam/serializers.py @@ -19,7 +19,7 @@ from django.contrib.auth.models import User from drf_spectacular.utils import extend_schema_field -from typing import Optional, Union, Dict +from typing import Optional, Union from cvat.apps.iam.forms import ResetPasswordFormEx from cvat.apps.iam.utils import get_dummy_user @@ -32,11 +32,11 @@ class RegisterSerializerEx(RegisterSerializer): key = serializers.SerializerMethodField() @extend_schema_field(serializers.BooleanField) - def get_email_verification_required(self, obj: Union[Dict, User]) -> bool: + def get_email_verification_required(self, obj: Union[dict, User]) -> bool: return allauth_settings.EMAIL_VERIFICATION == allauth_settings.EmailVerificationMethod.MANDATORY @extend_schema_field(serializers.CharField(allow_null=True)) - def get_key(self, obj: Union[Dict, User]) -> Optional[str]: + def get_key(self, obj: Union[dict, User]) -> Optional[str]: key = None if isinstance(obj, User) and allauth_settings.EMAIL_VERIFICATION != \ allauth_settings.EmailVerificationMethod.MANDATORY: diff --git a/cvat/apps/iam/signals.py b/cvat/apps/iam/signals.py index 28159cddc745..73f919a1a4a4 100644 --- a/cvat/apps/iam/signals.py +++ b/cvat/apps/iam/signals.py @@ -42,6 +42,9 @@ def create_user(sender, user=None, ldap_user=None, **kwargs): if role == settings.IAM_ADMIN_ROLE: user.is_staff = user.is_superuser = True break + # add default group if no other group has been assigned + if not len(user_groups): + user_groups.append(Group.objects.get(name=settings.IAM_DEFAULT_ROLE)) # It is important to save the user before adding groups. Please read # https://django-auth-ldap.readthedocs.io/en/latest/users.html#populating-users diff --git a/cvat/apps/iam/utils.py b/cvat/apps/iam/utils.py index 9cd122ab1ba3..8095902769f3 100644 --- a/cvat/apps/iam/utils.py +++ b/cvat/apps/iam/utils.py @@ -1,16 +1,19 @@ from pathlib import Path -from typing import Tuple import functools import hashlib +import importlib import io import tarfile +from django.conf import settings +from django.contrib.sessions.backends.base import SessionBase + _OPA_RULES_PATHS = { Path(__file__).parent / 'rules', } @functools.lru_cache(maxsize=None) -def get_opa_bundle() -> Tuple[bytes, str]: +def get_opa_bundle() -> tuple[bytes, str]: bundle_file = io.BytesIO() with tarfile.open(fileobj=bundle_file, mode='w:gz') as tar: @@ -43,3 +46,7 @@ def get_dummy_user(email): if email.verified: return None return user + +def clean_up_sessions() -> None: + SessionStore: type[SessionBase] = importlib.import_module(settings.SESSION_ENGINE).SessionStore + SessionStore.clear_expired() diff --git a/cvat/apps/lambda_manager/rules/lambda.rego b/cvat/apps/lambda_manager/rules/lambda.rego index 2829860c0932..7b3b6c828975 100644 --- a/cvat/apps/lambda_manager/rules/lambda.rego +++ b/cvat/apps/lambda_manager/rules/lambda.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/lambda_manager/rules/tests/generators/lambda_test.gen.rego.py b/cvat/apps/lambda_manager/rules/tests/generators/lambda_test.gen.rego.py index 5a669c5f49fc..94f694988a38 100644 --- a/cvat/apps/lambda_manager/rules/tests/generators/lambda_test.gen.rego.py +++ b/cvat/apps/lambda_manager/rules/tests/generators/lambda_test.gen.rego.py @@ -41,7 +41,7 @@ def read_rules(name): SCOPES = list({rule["scope"] for rule in simple_rules}) CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] diff --git a/cvat/apps/lambda_manager/serializers.py b/cvat/apps/lambda_manager/serializers.py index 4108b4e97ad9..ab8809bd7cc8 100644 --- a/cvat/apps/lambda_manager/serializers.py +++ b/cvat/apps/lambda_manager/serializers.py @@ -24,13 +24,11 @@ class FunctionCallRequestSerializer(serializers.Serializer): function = serializers.CharField(help_text="The name of the function to execute") task = serializers.IntegerField(help_text="The id of the task to be annotated") job = serializers.IntegerField(required=False, help_text="The id of the job to be annotated") - quality = serializers.ChoiceField(choices=['compressed', 'original'], default="original", - help_text="The quality of the images to use in the model run" - ) max_distance = serializers.IntegerField(required=False) threshold = serializers.FloatField(required=False) cleanup = serializers.BooleanField(help_text="Whether existing annotations should be removed", default=False) - convMaskToPoly = serializers.BooleanField(default=False) # TODO: use lowercase naming + convMaskToPoly = serializers.BooleanField(required=False, source="conv_mask_to_poly", write_only=True, help_text="Deprecated; use conv_mask_to_poly instead") + conv_mask_to_poly = serializers.BooleanField(required=False, help_text="Convert mask shapes to polygons") mapping = serializers.DictField(child=LabelMappingEntrySerializer(), required=False, help_text="Label mapping from the model to the task labels" ) diff --git a/cvat/apps/lambda_manager/tests/test_lambda.py b/cvat/apps/lambda_manager/tests/test_lambda.py index 57c74cf2c529..f9292b278b45 100644 --- a/cvat/apps/lambda_manager/tests/test_lambda.py +++ b/cvat/apps/lambda_manager/tests/test_lambda.py @@ -5,7 +5,7 @@ from collections import Counter, OrderedDict from itertools import groupby -from typing import Dict, Optional +from typing import Optional from unittest import mock, skip import json import os @@ -133,7 +133,7 @@ def _invoke_function(self, func, payload): @classmethod def _create_db_users(cls): (group_admin, _) = Group.objects.get_or_create(name="admin") - (group_user, _) = Group.objects.get_or_create(name="business") + (group_user, _) = Group.objects.get_or_create(name="user") user_admin = User.objects.create_superuser(username="admin", email="", password="admin") @@ -368,7 +368,6 @@ def test_api_v2_lambda_requests_read(self): "task": self.main_task["id"], "cleanup": True, "threshold": 55, - "quality": "original", "mapping": { "car": { "name": "car" }, }, @@ -447,7 +446,6 @@ def test_api_v2_lambda_requests_create(self): "task": self.main_task["id"], "cleanup": True, "threshold": 55, - "quality": "original", "mapping": { "car": { "name": "car" }, }, @@ -456,7 +454,6 @@ def test_api_v2_lambda_requests_create(self): "function": id_func, "task": self.assigneed_to_user_task["id"], "cleanup": False, - "quality": "compressed", "max_distance": 70, "mapping": { "car": { "name": "car" }, @@ -769,7 +766,6 @@ def test_api_v2_lambda_functions_create_reid(self): OrderedDict([('attributes', []), ('frame', 1), ('group', None), ('id', 11260), ('label_id', 8), ('occluded', False), ('points', [1076.0, 199.0, 1218.0, 593.0]), ('source', 'auto'), ('type', 'rectangle'), ('z_order', 0)]), OrderedDict([('attributes', []), ('frame', 1), ('group', None), ('id', 11261), ('label_id', 8), ('occluded', False), ('points', [924.0, 177.0, 1090.0, 615.0]), ('source', 'auto'), ('type', 'rectangle'), ('z_order', 0)]), ], - "quality": None, "threshold": 0.5, "max_distance": 55, } @@ -785,7 +781,6 @@ def test_api_v2_lambda_functions_create_reid(self): OrderedDict([('attributes', []), ('frame', 1), ('group', None), ('id', 11260), ('label_id', 8), ('occluded', False), ('points', [1076.0, 199.0, 1218.0, 593.0]), ('source', 'auto'), ('type', 'rectangle'), ('z_order', 0)]), OrderedDict([('attributes', []), ('frame', 1), ('group', 0), ('id', 11398), ('label_id', 8), ('occluded', False), ('points', [184.3935546875, 211.5048828125, 331.64968722073354, 97.27792672028772, 445.87667560321825, 126.17873100983161, 454.13404825737416, 691.8087578194827, 180.26452189455085]), ('source', 'manual'), ('type', 'polygon'), ('z_order', 0)]), ], - "quality": None, } response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_reid_with_response_data}", self.admin, data_main_task) @@ -829,42 +824,11 @@ def test_api_v2_lambda_functions_create_negative(self): self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR) - def test_api_v2_lambda_functions_create_quality(self): - qualities = [None, "original", "compressed"] - - for quality in qualities: - data = { - "task": self.main_task["id"], - "frame": 0, - "cleanup": True, - "quality": quality, - "mapping": { - "car": { "name": "car" }, - }, - } - - response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data) - self.assertEqual(response.status_code, status.HTTP_200_OK) - - data = { - "task": self.main_task["id"], - "frame": 0, - "cleanup": True, - "quality": "test-error-quality", - "mapping": { - "car": { "name": "car" }, - }, - } - - response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data) - self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - def test_api_v2_lambda_functions_convert_mask_to_rle(self): data_main_task = { "function": id_function_detector, "task": self.main_task["id"], "cleanup": True, - "quality": "original", "mapping": { "car": { "name": "car" }, }, @@ -1476,7 +1440,7 @@ class Issue4996_Cases(_LambdaTestCaseBase): # We need to check that job assignee can call functions in the assigned jobs # This requires to pass the job id in the call request. - def _create_org(self, *, owner: int, members: Dict[int, str] = None) -> dict: + def _create_org(self, *, owner: int, members: dict[int, str] = None) -> dict: org = self._post_request('/api/organizations', user=owner, data={ "slug": "testorg", "name": "test Org", diff --git a/cvat/apps/lambda_manager/views.py b/cvat/apps/lambda_manager/views.py index 143537985fd7..559ef29813b5 100644 --- a/cvat/apps/lambda_manager/views.py +++ b/cvat/apps/lambda_manager/views.py @@ -12,7 +12,7 @@ from copy import deepcopy from datetime import timedelta from functools import wraps -from typing import Any, Dict, Optional +from typing import Any, Optional import datumaro.util.mask_tools as mask_tools import django_rq @@ -32,7 +32,7 @@ from rest_framework.request import Request import cvat.apps.dataset_manager as dm -from cvat.apps.engine.frame_provider import FrameQuality, TaskFrameProvider +from cvat.apps.engine.frame_provider import TaskFrameProvider from cvat.apps.engine.models import ( Job, ShapeType, SourceType, Task, Label, RequestAction, RequestTarget ) @@ -231,7 +231,7 @@ def to_dict(self): def invoke( self, db_task: Task, - data: Dict[str, Any], + data: dict[str, Any], *, db_job: Optional[Job] = None, is_interactive: Optional[bool] = False, @@ -257,13 +257,12 @@ def mandatory_arg(name: str) -> Any: threshold = data.get("threshold") if threshold: payload.update({ "threshold": threshold }) - quality = data.get("quality") mapping = data.get("mapping", {}) model_labels = self.labels task_labels = db_task.get_labels(prefetch=True) - def labels_compatible(model_label: Dict, task_label: Label) -> bool: + def labels_compatible(model_label: dict, task_label: Label) -> bool: model_type = model_label['type'] db_type = task_label.type compatible_types = [[ShapeType.MASK, ShapeType.POLYGON]] @@ -387,19 +386,19 @@ def validate_attributes_mapping(attributes_mapping, model_attributes, db_attribu if self.kind == FunctionKind.DETECTOR: payload.update({ - "image": self._get_image(db_task, mandatory_arg("frame"), quality) + "image": self._get_image(db_task, mandatory_arg("frame")) }) elif self.kind == FunctionKind.INTERACTOR: payload.update({ - "image": self._get_image(db_task, mandatory_arg("frame"), quality), + "image": self._get_image(db_task, mandatory_arg("frame")), "pos_points": mandatory_arg("pos_points"), "neg_points": mandatory_arg("neg_points"), "obj_bbox": data.get("obj_bbox", None) }) elif self.kind == FunctionKind.REID: payload.update({ - "image0": self._get_image(db_task, mandatory_arg("frame0"), quality), - "image1": self._get_image(db_task, mandatory_arg("frame1"), quality), + "image0": self._get_image(db_task, mandatory_arg("frame0")), + "image1": self._get_image(db_task, mandatory_arg("frame1")), "boxes0": mandatory_arg("boxes0"), "boxes1": mandatory_arg("boxes1") }) @@ -410,7 +409,7 @@ def validate_attributes_mapping(attributes_mapping, model_attributes, db_attribu }) elif self.kind == FunctionKind.TRACKER: payload.update({ - "image": self._get_image(db_task, mandatory_arg("frame"), quality), + "image": self._get_image(db_task, mandatory_arg("frame")), "shapes": data.get("shapes", []), "states": data.get("states", []) }) @@ -487,19 +486,9 @@ def transform_attributes(input_attributes, attr_mapping, db_attributes): return response - def _get_image(self, db_task, frame, quality): - if quality is None or quality == "original": - quality = FrameQuality.ORIGINAL - elif quality == "compressed": - quality = FrameQuality.COMPRESSED - else: - raise ValidationError( - '`{}` lambda function was run '.format(self.id) + - 'with wrong arguments (quality={})'.format(quality), - code=status.HTTP_400_BAD_REQUEST) - + def _get_image(self, db_task, frame): frame_provider = TaskFrameProvider(db_task) - image = frame_provider.get_frame(frame, quality=quality) + image = frame_provider.get_frame(frame) return base64.b64encode(image.data.getvalue()).decode('utf-8') @@ -523,7 +512,7 @@ def get_jobs(self): return [LambdaJob(job) for job in jobs if job and job.meta.get("lambda")] def enqueue(self, - lambda_func, threshold, task, quality, mapping, cleanup, conv_mask_to_poly, max_distance, request, + lambda_func, threshold, task, mapping, cleanup, conv_mask_to_poly, max_distance, request, *, job: Optional[int] = None ) -> LambdaJob: @@ -576,7 +565,6 @@ def enqueue(self, "threshold": threshold, "task": task, "job": job, - "quality": quality, "cleanup": cleanup, "conv_mask_to_poly": conv_mask_to_poly, "mapping": mapping, @@ -666,10 +654,9 @@ def _call_detector( cls, function: LambdaFunction, db_task: Task, - labels: Dict[str, Dict[str, Any]], - quality: str, + labels: dict[str, dict[str, Any]], threshold: float, - mapping: Optional[Dict[str, str]], + mapping: Optional[dict[str, str]], conv_mask_to_poly: bool, *, db_job: Optional[Job] = None @@ -799,7 +786,7 @@ def _map(sublabel_body): continue annotations = function.invoke(db_task, db_job=db_job, data={ - "frame": frame, "quality": quality, "mapping": mapping, + "frame": frame, "mapping": mapping, "threshold": threshold }) @@ -854,7 +841,6 @@ def _call_reid( cls, function: LambdaFunction, db_task: Task, - quality: str, threshold: float, max_distance: int, *, @@ -887,7 +873,7 @@ def _call_reid( boxes1 = boxes_by_frame[frame1] if boxes0 and boxes1: matching = function.invoke(db_task, db_job=db_job, data={ - "frame0": frame0, "frame1": frame1, "quality": quality, + "frame0": frame0, "frame1": frame1, "boxes0": boxes0, "boxes1": boxes1, "threshold": threshold, "max_distance": max_distance}) @@ -947,7 +933,7 @@ def _call_reid( dm.task.put_task_data(db_task.id, serializer.data) @classmethod - def __call__(cls, function, task: int, quality: str, cleanup: bool, **kwargs): + def __call__(cls, function, task: int, cleanup: bool, **kwargs): # TODO: need logging db_job = None if job := kwargs.get('job'): @@ -977,11 +963,11 @@ def convert_labels(db_labels): labels = convert_labels(db_task.get_labels(prefetch=True)) if function.kind == FunctionKind.DETECTOR: - cls._call_detector(function, db_task, labels, quality, + cls._call_detector(function, db_task, labels, kwargs.get("threshold"), kwargs.get("mapping"), kwargs.get("conv_mask_to_poly"), db_job=db_job) elif function.kind == FunctionKind.REID: - cls._call_reid(function, db_task, quality, + cls._call_reid(function, db_task, kwargs.get("threshold"), kwargs.get("max_distance"), db_job=db_job) def return_response(success_code=status.HTTP_200_OK): @@ -1176,9 +1162,8 @@ def create(self, request): threshold = request_data.get('threshold') task = request_data['task'] job = request_data.get('job', None) - quality = request_data.get("quality") cleanup = request_data.get('cleanup', False) - conv_mask_to_poly = request_data.get('convMaskToPoly', False) + conv_mask_to_poly = request_data.get('conv_mask_to_poly', False) mapping = request_data.get('mapping') max_distance = request_data.get('max_distance') except KeyError as err: @@ -1190,7 +1175,7 @@ def create(self, request): gateway = LambdaGateway() queue = LambdaQueue() lambda_func = gateway.get(function) - rq_job = queue.enqueue(lambda_func, threshold, task, quality, + rq_job = queue.enqueue(lambda_func, threshold, task, mapping, cleanup, conv_mask_to_poly, max_distance, request, job=job) handle_function_call(function, job or task, category="batch") diff --git a/cvat/apps/log_viewer/permissions.py b/cvat/apps/log_viewer/permissions.py index 2d9500a29ea2..d25aa7fe275a 100644 --- a/cvat/apps/log_viewer/permissions.py +++ b/cvat/apps/log_viewer/permissions.py @@ -5,9 +5,12 @@ from django.conf import settings -from cvat.apps.iam.permissions import OpenPolicyAgentPermission, StrEnum +from cvat.apps.iam.permissions import OpenPolicyAgentPermission, StrEnum, get_iam_context + class LogViewerPermission(OpenPolicyAgentPermission): + has_analytics_access: bool + class Scopes(StrEnum): VIEW = 'view' @@ -21,8 +24,21 @@ def create(cls, request, view, obj, iam_context): return permissions - def __init__(self, **kwargs): + @classmethod + def create_base_perm(cls, request, view, scope, iam_context, obj=None, **kwargs): + if not iam_context and request: + iam_context = get_iam_context(request, obj) + return cls( + scope=scope, + obj=obj, + has_analytics_access=request.user.profile.has_analytics_access, + **iam_context, + **kwargs + ) + + def __init__(self, has_analytics_access=False, **kwargs): super().__init__(**kwargs) + self.payload['input']['auth']['user']['has_analytics_access'] = has_analytics_access self.url = settings.IAM_OPA_DATA_URL + '/analytics/allow' @staticmethod @@ -33,6 +49,4 @@ def get_scopes(request, view, obj): }[view.action]] def get_resource(self): - return { - 'visibility': 'public' if settings.RESTRICTIONS['analytics_visibility'] else 'private', - } + return None diff --git a/cvat/apps/log_viewer/rules/analytics.rego b/cvat/apps/log_viewer/rules/analytics.rego index 970a6a3e97d1..f40653f63e2b 100644 --- a/cvat/apps/log_viewer/rules/analytics.rego +++ b/cvat/apps/log_viewer/rules/analytics.rego @@ -9,7 +9,8 @@ import data.utils # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null, +# "has_analytics_access": # }, # "organization": { # "id": , @@ -21,19 +22,10 @@ import data.utils # } # } or null, # }, -# "resource": { -# "visibility": <"public"|"private"> or null, -# } # } default allow := false allow if { - utils.is_admin -} - -allow if { - input.resource.visibility == utils.PUBLIC - input.scope == utils.VIEW - utils.has_perm(utils.BUSINESS) + input.auth.user.has_analytics_access } diff --git a/cvat/apps/log_viewer/rules/tests/configs/analytics.csv b/cvat/apps/log_viewer/rules/tests/configs/analytics.csv index d38b0514425c..7ff4ea280475 100644 --- a/cvat/apps/log_viewer/rules/tests/configs/analytics.csv +++ b/cvat/apps/log_viewer/rules/tests/configs/analytics.csv @@ -1,3 +1,2 @@ -Scope,Resource,Context,Ownership,Limit,Method,URL,Privilege,Membership -view,Analytics,N/A,N/A,resource['visibility']=='public',GET,"/analytics",business,N/A -view,Analytics,N/A,N/A,,GET,"/analytics",admin,N/A +Scope,Resource,Context,Ownership,Limit,Method,URL,Privilege,Membership,HasAnalyticsAccess +view,Analytics,N/A,N/A,,GET,"/analytics",none,N/A,true diff --git a/cvat/apps/log_viewer/rules/tests/generators/analytics_test.gen.rego.py b/cvat/apps/log_viewer/rules/tests/generators/analytics_test.gen.rego.py index ce4b50a7c8fb..95d566e4b93a 100644 --- a/cvat/apps/log_viewer/rules/tests/generators/analytics_test.gen.rego.py +++ b/cvat/apps/log_viewer/rules/tests/generators/analytics_test.gen.rego.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022 CVAT.ai Corporation +# Copyright (C) 2022-2024 CVAT.ai Corporation # # SPDX-License-Identifier: MIT @@ -41,24 +41,16 @@ def read_rules(name): SCOPES = {rule["scope"] for rule in simple_rules} CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] +HAS_ANALYTICS_ACCESS = [True, False] def RESOURCES(scope): - if scope == "view": - return [ - {"visibility": "public"}, - {"visibility": "private"}, - ] - return [None] -def eval_rule(scope, context, ownership, privilege, membership, data): - if privilege == "admin": - return True - +def eval_rule(scope, context, ownership, privilege, membership, data, has_analytics_access): rules = list(filter(lambda r: scope == r["scope"], simple_rules)) rules = list(filter(lambda r: r["context"] == "na" or context == r["context"], rules)) rules = list(filter(lambda r: r["ownership"] == "na" or ownership == r["ownership"], rules)) @@ -70,17 +62,22 @@ def eval_rule(scope, context, ownership, privilege, membership, data): ) ) rules = list(filter(lambda r: GROUPS.index(privilege) <= GROUPS.index(r["privilege"]), rules)) + rules = list(filter(lambda r: r["hasanalyticsaccess"] in ("na", str(has_analytics_access).lower()), rules)) resource = data["resource"] - rules = list(filter(lambda r: eval(r["limit"], {"resource": resource}), rules)) + rules = list(filter(lambda r: not r["limit"] or eval(r["limit"], {"resource": resource}), rules)) return bool(rules) -def get_data(scope, context, ownership, privilege, membership, resource): +def get_data(scope, context, ownership, privilege, membership, resource, has_analytics_access): data = { "scope": scope, "auth": { - "user": {"id": random.randrange(0, 100), "privilege": privilege}, + "user": { + "id": random.randrange(0, 100), # nosec B311 NOSONAR + "privilege": privilege, + "has_analytics_access": has_analytics_access, + }, "organization": { "id": random.randrange(100, 200), "owner": {"id": random.randrange(200, 300)}, @@ -123,7 +120,7 @@ def _get_name(prefix, **kwargs): return name -def get_name(scope, context, ownership, privilege, membership, resource): +def get_name(scope, context, ownership, privilege, membership, resource, has_analytics_access): return _get_name("test", **locals()) @@ -139,16 +136,16 @@ def is_valid(scope, context, ownership, privilege, membership, resource): def gen_test_rego(name): with open(f"{name}_test.gen.rego", "wt") as f: f.write(f"package {name}\nimport rego.v1\n\n") - for scope, context, ownership, privilege, membership in product( - SCOPES, CONTEXTS, OWNERSHIPS, GROUPS, ORG_ROLES + for scope, context, ownership, privilege, membership, has_analytics_access in product( + SCOPES, CONTEXTS, OWNERSHIPS, GROUPS, ORG_ROLES, HAS_ANALYTICS_ACCESS ): for resource in RESOURCES(scope): if not is_valid(scope, context, ownership, privilege, membership, resource): continue - data = get_data(scope, context, ownership, privilege, membership, resource) - test_name = get_name(scope, context, ownership, privilege, membership, resource) - result = eval_rule(scope, context, ownership, privilege, membership, data) + data = get_data(scope, context, ownership, privilege, membership, resource, has_analytics_access) + test_name = get_name(scope, context, ownership, privilege, membership, resource, has_analytics_access) + result = eval_rule(scope, context, ownership, privilege, membership, data, has_analytics_access) f.write( "{test_name} if {{\n {allow} with input as {data}\n}}\n\n".format( test_name=test_name, diff --git a/cvat/apps/organizations/rules/invitations.rego b/cvat/apps/organizations/rules/invitations.rego index 3a51f76128e5..2e15ba4a8637 100644 --- a/cvat/apps/organizations/rules/invitations.rego +++ b/cvat/apps/organizations/rules/invitations.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/organizations/rules/memberships.rego b/cvat/apps/organizations/rules/memberships.rego index c23f3039ff16..09752e4b7007 100644 --- a/cvat/apps/organizations/rules/memberships.rego +++ b/cvat/apps/organizations/rules/memberships.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/organizations/rules/organizations.rego b/cvat/apps/organizations/rules/organizations.rego index 24643feab703..6d0a8c29c19b 100644 --- a/cvat/apps/organizations/rules/organizations.rego +++ b/cvat/apps/organizations/rules/organizations.rego @@ -9,7 +9,7 @@ import data.utils # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": null, # }, @@ -69,11 +69,6 @@ allow if { utils.has_perm(utils.USER) } -allow if { - input.scope == utils.CREATE - utils.has_perm(utils.BUSINESS) -} - filter := [] if { # Django Q object to filter list of entries utils.is_admin } else := qobject if { diff --git a/cvat/apps/organizations/rules/tests/generators/invitations_test.gen.rego.py b/cvat/apps/organizations/rules/tests/generators/invitations_test.gen.rego.py index c3ba86abb75f..bf7edec50713 100644 --- a/cvat/apps/organizations/rules/tests/generators/invitations_test.gen.rego.py +++ b/cvat/apps/organizations/rules/tests/generators/invitations_test.gen.rego.py @@ -41,7 +41,7 @@ def read_rules(name): SCOPES = {rule["scope"] for rule in simple_rules} CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["owner", "invitee", "none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] SAME_ORG = [False, True] diff --git a/cvat/apps/organizations/rules/tests/generators/memberships_test.gen.rego.py b/cvat/apps/organizations/rules/tests/generators/memberships_test.gen.rego.py index b86548142da7..c74a4a7c992b 100644 --- a/cvat/apps/organizations/rules/tests/generators/memberships_test.gen.rego.py +++ b/cvat/apps/organizations/rules/tests/generators/memberships_test.gen.rego.py @@ -41,7 +41,7 @@ def read_rules(name): SCOPES = {rule["scope"] for rule in simple_rules} CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["self", "none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] SAME_ORG = [False, True] diff --git a/cvat/apps/organizations/rules/tests/generators/organizations_test.gen.rego.py b/cvat/apps/organizations/rules/tests/generators/organizations_test.gen.rego.py index a6c111bfef40..d2a8a6fb653b 100644 --- a/cvat/apps/organizations/rules/tests/generators/organizations_test.gen.rego.py +++ b/cvat/apps/organizations/rules/tests/generators/organizations_test.gen.rego.py @@ -41,7 +41,7 @@ def read_rules(name): SCOPES = {rule["scope"] for rule in simple_rules} CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["owner", "maintainer", "supervisor", "worker", "none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] diff --git a/cvat/apps/profiler.py b/cvat/apps/profiler.py index 45ddbc95f478..0a3885bb00a5 100644 --- a/cvat/apps/profiler.py +++ b/cvat/apps/profiler.py @@ -1,13 +1,16 @@ from django.apps import apps -if apps.is_installed('silk'): +if apps.is_installed("silk"): from silk.profiling.profiler import silk_profile # pylint: disable=unused-import else: from functools import wraps + def silk_profile(name=None): def profile(f): @wraps(f) def wrapped(*args, **kwargs): return f(*args, **kwargs) + return wrapped + return profile diff --git a/cvat/apps/quality_control/apps.py b/cvat/apps/quality_control/apps.py index b1fd560de705..780613bb085c 100644 --- a/cvat/apps/quality_control/apps.py +++ b/cvat/apps/quality_control/apps.py @@ -10,14 +10,6 @@ class QualityControlConfig(AppConfig): name = "cvat.apps.quality_control" def ready(self) -> None: - from django.conf import settings - - from . import default_settings - - for key in dir(default_settings): - if key.isupper() and not hasattr(settings, key): - setattr(settings, key, getattr(default_settings, key)) - from cvat.apps.iam.permissions import load_app_permissions load_app_permissions(self) diff --git a/cvat/apps/quality_control/default_settings.py b/cvat/apps/quality_control/default_settings.py deleted file mode 100644 index 06f5aadaffc3..000000000000 --- a/cvat/apps/quality_control/default_settings.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (C) 2023 Intel Corporation -# -# SPDX-License-Identifier: MIT - -import os - -QUALITY_CHECK_JOB_DELAY = int(os.getenv("CVAT_QUALITY_CHECK_JOB_DELAY", 15 * 60)) -"The delay before the next quality check job is queued, in seconds" diff --git a/cvat/apps/quality_control/migrations/0004_qualitysettings_point_size_base.py b/cvat/apps/quality_control/migrations/0004_qualitysettings_point_size_base.py new file mode 100644 index 000000000000..024d263356ac --- /dev/null +++ b/cvat/apps/quality_control/migrations/0004_qualitysettings_point_size_base.py @@ -0,0 +1,24 @@ +# Generated by Django 4.2.15 on 2024-11-06 15:39 + +from django.db import migrations, models + +import cvat.apps.quality_control.models + + +class Migration(migrations.Migration): + + dependencies = [ + ("quality_control", "0003_qualityreport_assignee_last_updated_and_more"), + ] + + operations = [ + migrations.AddField( + model_name="qualitysettings", + name="point_size_base", + field=models.CharField( + choices=[("image_size", "IMAGE_SIZE"), ("group_bbox_size", "GROUP_BBOX_SIZE")], + default=cvat.apps.quality_control.models.PointSizeBase["GROUP_BBOX_SIZE"], + max_length=32, + ), + ), + ] diff --git a/cvat/apps/quality_control/migrations/0005_qualitysettings_match_empty.py b/cvat/apps/quality_control/migrations/0005_qualitysettings_match_empty.py new file mode 100644 index 000000000000..dba6a0c9bc43 --- /dev/null +++ b/cvat/apps/quality_control/migrations/0005_qualitysettings_match_empty.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.15 on 2024-11-05 14:22 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("quality_control", "0004_qualitysettings_point_size_base"), + ] + + operations = [ + migrations.AddField( + model_name="qualitysettings", + name="match_empty_frames", + field=models.BooleanField(default=False), + ), + ] diff --git a/cvat/apps/quality_control/models.py b/cvat/apps/quality_control/models.py index 37f0f1f9612d..a5359e4fe944 100644 --- a/cvat/apps/quality_control/models.py +++ b/cvat/apps/quality_control/models.py @@ -4,9 +4,10 @@ from __future__ import annotations +from collections.abc import Sequence from copy import deepcopy from enum import Enum -from typing import Any, Sequence +from typing import Any from django.core.exceptions import ValidationError from django.db import models @@ -196,6 +197,18 @@ def clean(self) -> None: raise ValidationError(f"Unexpected type value '{self.type}'") +class PointSizeBase(str, Enum): + IMAGE_SIZE = "image_size" + GROUP_BBOX_SIZE = "group_bbox_size" + + def __str__(self) -> str: + return self.value + + @classmethod + def choices(cls): + return tuple((x.value, x.name) for x in cls) + + class QualitySettings(models.Model): task = models.OneToOneField(Task, on_delete=models.CASCADE, related_name="quality_settings") @@ -205,6 +218,10 @@ class QualitySettings(models.Model): low_overlap_threshold = models.FloatField() + point_size_base = models.CharField( + max_length=32, choices=PointSizeBase.choices(), default=PointSizeBase.GROUP_BBOX_SIZE + ) + compare_line_orientation = models.BooleanField() line_orientation_threshold = models.FloatField() @@ -218,6 +235,8 @@ class QualitySettings(models.Model): compare_attributes = models.BooleanField() + match_empty_frames = models.BooleanField(default=False) + target_metric = models.CharField( max_length=32, choices=QualityTargetMetricType.choices(), diff --git a/cvat/apps/quality_control/quality_reports.py b/cvat/apps/quality_control/quality_reports.py index 437cdb72615f..25b5c962dc26 100644 --- a/cvat/apps/quality_control/quality_reports.py +++ b/cvat/apps/quality_control/quality_reports.py @@ -7,10 +7,10 @@ import itertools import math from collections import Counter +from collections.abc import Hashable, Sequence from copy import deepcopy -from datetime import timedelta from functools import cached_property, partial -from typing import Any, Callable, Dict, Hashable, List, Optional, Sequence, Tuple, Union, cast +from typing import Any, Callable, Optional, Union, cast import datumaro as dm import datumaro.util.mask_tools @@ -21,7 +21,6 @@ from datumaro.util import dump_json, parse_json from django.conf import settings from django.db import transaction -from django.utils import timezone from django_rq.queues import DjangoRQ as RqQueue from rest_framework.request import Request from rq.job import Job as RqJob @@ -60,7 +59,6 @@ AnnotationConflictType, AnnotationType, ) -from cvat.utils.background_jobs import schedule_job_with_throttling class _Serializable: @@ -77,7 +75,7 @@ def _value_serializer(self, v): def to_dict(self) -> dict: return self._value_serializer(self._fields_dict()) - def _fields_dict(self, *, include_properties: Optional[List[str]] = None) -> dict: + def _fields_dict(self, *, include_properties: Optional[list[str]] = None) -> dict: d = asdict(self, recurse=False) for field_name in include_properties or []: @@ -117,7 +115,7 @@ def from_dict(cls, d: dict): class AnnotationConflict(_Serializable): frame_id: int type: AnnotationConflictType - annotation_ids: List[AnnotationId] + annotation_ids: list[AnnotationId] @property def severity(self) -> AnnotationConflictSeverity: @@ -146,7 +144,7 @@ def _value_serializer(self, v): else: return super()._value_serializer(v) - def _fields_dict(self, *, include_properties: Optional[List[str]] = None) -> dict: + def _fields_dict(self, *, include_properties: Optional[list[str]] = None) -> dict: return super()._fields_dict(include_properties=include_properties or ["severity"]) @classmethod @@ -160,7 +158,7 @@ def from_dict(cls, d: dict): @define(kw_only=True) class ComparisonParameters(_Serializable): - included_annotation_types: List[dm.AnnotationType] = [ + included_annotation_types: list[dm.AnnotationType] = [ dm.AnnotationType.bbox, dm.AnnotationType.points, dm.AnnotationType.mask, @@ -176,7 +174,7 @@ class ComparisonParameters(_Serializable): compare_attributes: bool = True "Enables or disables attribute checks" - ignored_attributes: List[str] = [] + ignored_attributes: list[str] = [] iou_threshold: float = 0.4 "Used for distinction between matched / unmatched shapes" @@ -187,6 +185,9 @@ class ComparisonParameters(_Serializable): oks_sigma: float = 0.09 "Like IoU threshold, but for points, % of the bbox area to match a pair of points" + point_size_base: models.PointSizeBase = models.PointSizeBase.GROUP_BBOX_SIZE + "Determines how to obtain the object size for point comparisons" + line_thickness: float = 0.01 "Thickness of polylines, relatively to the (image area) ^ 0.5" @@ -214,6 +215,13 @@ class ComparisonParameters(_Serializable): panoptic_comparison: bool = True "Use only the visible part of the masks and polygons in comparisons" + match_empty_frames: bool = False + """ + Consider unannotated (empty) frames as matching. If disabled, quality metrics, such as accuracy, + will be 0 if both GT and DS frames have no annotations. When enabled, they will be 1 instead. + This will also add virtual annotations to empty frames in the comparison results. + """ + def _value_serializer(self, v): if isinstance(v, dm.AnnotationType): return str(v.name) @@ -228,12 +236,12 @@ def from_dict(cls, d: dict): @define(kw_only=True) class ConfusionMatrix(_Serializable): - labels: List[str] - rows: np.array - precision: np.array - recall: np.array - accuracy: np.array - jaccard_index: Optional[np.array] + labels: list[str] + rows: np.ndarray + precision: np.ndarray + recall: np.ndarray + accuracy: np.ndarray + jaccard_index: Optional[np.ndarray] @property def axes(self): @@ -245,7 +253,7 @@ def _value_serializer(self, v): else: return super()._value_serializer(v) - def _fields_dict(self, *, include_properties: Optional[List[str]] = None) -> dict: + def _fields_dict(self, *, include_properties: Optional[list[str]] = None) -> dict: return super()._fields_dict(include_properties=include_properties or ["axes"]) @classmethod @@ -295,7 +303,7 @@ def accumulate(self, other: ComparisonReportAnnotationsSummary): ]: setattr(self, field, getattr(self, field) + getattr(other, field)) - def _fields_dict(self, *, include_properties: Optional[List[str]] = None) -> dict: + def _fields_dict(self, *, include_properties: Optional[list[str]] = None) -> dict: return super()._fields_dict( include_properties=include_properties or ["accuracy", "precision", "recall"] ) @@ -338,7 +346,7 @@ def accumulate(self, other: ComparisonReportAnnotationShapeSummary): ]: setattr(self, field, getattr(self, field) + getattr(other, field)) - def _fields_dict(self, *, include_properties: Optional[List[str]] = None) -> dict: + def _fields_dict(self, *, include_properties: Optional[list[str]] = None) -> dict: return super()._fields_dict(include_properties=include_properties or ["accuracy"]) @classmethod @@ -368,7 +376,7 @@ def accumulate(self, other: ComparisonReportAnnotationLabelSummary): for field in ["valid_count", "total_count", "invalid_count"]: setattr(self, field, getattr(self, field) + getattr(other, field)) - def _fields_dict(self, *, include_properties: Optional[List[str]] = None) -> dict: + def _fields_dict(self, *, include_properties: Optional[list[str]] = None) -> dict: return super()._fields_dict(include_properties=include_properties or ["accuracy"]) @classmethod @@ -400,7 +408,7 @@ def from_dict(cls, d: dict): @define(kw_only=True) class ComparisonReportComparisonSummary(_Serializable): frame_share: float - frames: List[str] + frames: list[str] @property def mean_conflict_count(self) -> float: @@ -409,7 +417,7 @@ def mean_conflict_count(self) -> float: conflict_count: int warning_count: int error_count: int - conflicts_by_type: Dict[AnnotationConflictType, int] + conflicts_by_type: dict[AnnotationConflictType, int] annotations: ComparisonReportAnnotationsSummary annotation_components: ComparisonReportAnnotationComponentsSummary @@ -424,7 +432,7 @@ def _value_serializer(self, v): else: return super()._value_serializer(v) - def _fields_dict(self, *, include_properties: Optional[List[str]] = None) -> dict: + def _fields_dict(self, *, include_properties: Optional[list[str]] = None) -> dict: return super()._fields_dict( include_properties=include_properties or [ @@ -456,7 +464,7 @@ def from_dict(cls, d: dict): @define(kw_only=True, init=False) class ComparisonReportFrameSummary(_Serializable): - conflicts: List[AnnotationConflict] + conflicts: list[AnnotationConflict] @cached_property def conflict_count(self) -> int: @@ -471,7 +479,7 @@ def error_count(self) -> int: return len([c for c in self.conflicts if c.severity == AnnotationConflictSeverity.ERROR]) @cached_property - def conflicts_by_type(self) -> Dict[AnnotationConflictType, int]: + def conflicts_by_type(self) -> dict[AnnotationConflictType, int]: return Counter(c.type for c in self.conflicts) annotations: ComparisonReportAnnotationsSummary @@ -493,7 +501,7 @@ def __init__(self, *args, **kwargs): self.__attrs_init__(*args, **kwargs) - def _fields_dict(self, *, include_properties: Optional[List[str]] = None) -> dict: + def _fields_dict(self, *, include_properties: Optional[list[str]] = None) -> dict: return super()._fields_dict(include_properties=include_properties or self._CACHED_FIELDS) @classmethod @@ -524,14 +532,14 @@ def from_dict(cls, d: dict): class ComparisonReport(_Serializable): parameters: ComparisonParameters comparison_summary: ComparisonReportComparisonSummary - frame_results: Dict[int, ComparisonReportFrameSummary] + frame_results: dict[int, ComparisonReportFrameSummary] @property - def conflicts(self) -> List[AnnotationConflict]: + def conflicts(self) -> list[AnnotationConflict]: return list(itertools.chain.from_iterable(r.conflicts for r in self.frame_results.values())) @classmethod - def from_dict(cls, d: Dict[str, Any]) -> ComparisonReport: + def from_dict(cls, d: dict[str, Any]) -> ComparisonReport: return cls( parameters=ComparisonParameters.from_dict(d["parameters"]), comparison_summary=ComparisonReportComparisonSummary.from_dict(d["comparison_summary"]), @@ -622,7 +630,7 @@ def get_source_ann( def clear(self): self._annotation_mapping.clear() - def __call__(self, *args, **kwargs) -> List[dm.Annotation]: + def __call__(self, *args, **kwargs) -> list[dm.Annotation]: converter = _MemoizingAnnotationConverter(*args, factory=self, **kwargs) return converter.convert() @@ -851,7 +859,7 @@ def _compare_lines(self, a: np.ndarray, b: np.ndarray) -> float: return sum(np.exp(-(dists**2) / (2 * scale * (2 * self.torso_r) ** 2))) / len(a) @classmethod - def approximate_points(cls, a: np.ndarray, b: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + def approximate_points(cls, a: np.ndarray, b: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """ Creates 2 polylines with the same numbers of points, the points are placed on the original lines with the same step. @@ -949,12 +957,13 @@ def __init__( self, categories: dm.CategoriesInfo, *, - included_ann_types: Optional[List[dm.AnnotationType]] = None, + included_ann_types: Optional[list[dm.AnnotationType]] = None, return_distances: bool = False, iou_threshold: float = 0.5, # https://cocodataset.org/#keypoints-eval # https://github.com/cocodataset/cocoapi/blob/8c9bcc3cf640524c4c20a9c40e89cb6a2f2fa0e9/PythonAPI/pycocotools/cocoeval.py#L523 oks_sigma: float = 0.09, + point_size_base: models.PointSizeBase = models.PointSizeBase.GROUP_BBOX_SIZE, compare_line_orientation: bool = False, line_torso_radius: float = 0.01, panoptic_comparison: bool = False, @@ -968,6 +977,9 @@ def __init__( self.oks_sigma = oks_sigma "% of the shape area" + self.point_size_base = point_size_base + "Compare point groups using the group bbox size or the image size" + self.compare_line_orientation = compare_line_orientation "Whether lines are oriented or not" @@ -980,7 +992,7 @@ def __init__( def _instance_bbox( self, instance_anns: Sequence[dm.Annotation] - ) -> Tuple[float, float, float, float]: + ) -> tuple[float, float, float, float]: return dm.ops.max_bbox( a.get_bbox() if isinstance(a, dm.Skeleton) else a for a in instance_anns @@ -1127,7 +1139,7 @@ def _find_instances(annotations): return instances, instance_map def _get_compiled_mask( - anns: Sequence[dm.Annotation], *, instance_ids: Dict[int, int] + anns: Sequence[dm.Annotation], *, instance_ids: dict[int, int] ) -> dm.CompiledMask: if not anns: return None @@ -1293,13 +1305,20 @@ def _distance(a: dm.Points, b: dm.Points) -> float: else: # Complex case: multiple points, grouped points, points with a bbox # Try to align points and then return the metric - # match them in their bbox space - if dm.ops.bbox_iou(a_bbox, b_bbox) <= 0: - return 0 + if self.point_size_base == models.PointSizeBase.IMAGE_SIZE: + scale = img_h * img_w + elif self.point_size_base == models.PointSizeBase.GROUP_BBOX_SIZE: + # match points in their bbox space + + if dm.ops.bbox_iou(a_bbox, b_bbox) <= 0: + # this early exit may not work for points forming an axis-aligned line + return 0 - bbox = dm.ops.mean_bbox([a_bbox, b_bbox]) - scale = bbox[2] * bbox[3] + bbox = dm.ops.mean_bbox([a_bbox, b_bbox]) + scale = bbox[2] * bbox[3] + else: + assert False, f"Unknown point size base {self.point_size_base}" a_points = np.reshape(a.points, (-1, 2)) b_points = np.reshape(b.points, (-1, 2)) @@ -1525,6 +1544,7 @@ def __init__(self, categories: dm.CategoriesInfo, *, settings: ComparisonParamet panoptic_comparison=settings.panoptic_comparison, iou_threshold=settings.iou_threshold, oks_sigma=settings.oks_sigma, + point_size_base=settings.point_size_base, line_torso_radius=settings.line_thickness, compare_line_orientation=False, # should not be taken from outside, handled differently ) @@ -1561,7 +1581,7 @@ def match_attrs(self, ann_a: dm.Annotation, ann_b: dm.Annotation): def find_groups( self, item: dm.DatasetItem - ) -> Tuple[Dict[int, List[dm.Annotation]], Dict[int, int]]: + ) -> tuple[dict[int, list[dm.Annotation]], dict[int, int]]: ann_groups = dm.ops.find_instances( [ ann @@ -1610,7 +1630,7 @@ def _group_distance(gt_group_id, ds_group_id): return ds_to_gt_groups - def find_covered(self, item: dm.DatasetItem) -> List[dm.Annotation]: + def find_covered(self, item: dm.DatasetItem) -> list[dm.Annotation]: # Get annotations that can cover or be covered spatial_types = { dm.AnnotationType.polygon, @@ -1685,7 +1705,7 @@ def __init__( self._ds_dataset = self._ds_data_provider.dm_dataset self._gt_dataset = self._gt_data_provider.dm_dataset - self._frame_results: Dict[int, ComparisonReportFrameSummary] = {} + self._frame_results: dict[int, ComparisonReportFrameSummary] = {} self.comparator = _Comparator(self._gt_dataset.categories(), settings=settings) @@ -1722,7 +1742,7 @@ def _find_gt_conflicts(self): def _process_frame( self, ds_item: dm.DatasetItem, gt_item: dm.DatasetItem - ) -> List[AnnotationConflict]: + ) -> list[AnnotationConflict]: frame_id = self._dm_item_to_frame_id(ds_item, self._ds_dataset) frame_results = self.comparator.match_annotations(gt_item, ds_item) @@ -1734,7 +1754,7 @@ def _process_frame( def _generate_frame_annotation_conflicts( self, frame_id: str, frame_results, *, gt_item: dm.DatasetItem, ds_item: dm.DatasetItem - ) -> List[AnnotationConflict]: + ) -> list[AnnotationConflict]: conflicts = [] matches, mismatches, gt_unmatched, ds_unmatched, _ = frame_results["all_ann_types"] @@ -1957,8 +1977,18 @@ def _find_closest_unmatched_shape(shape: dm.Annotation): gt_label_idx = label_id_map[gt_ann.label] if gt_ann else self._UNMATCHED_IDX confusion_matrix[ds_label_idx, gt_label_idx] += 1 + if self.settings.match_empty_frames and not gt_item.annotations and not ds_item.annotations: + # Add virtual annotations for empty frames + valid_labels_count = 1 + total_labels_count = 1 + + valid_shapes_count = 1 + total_shapes_count = 1 + ds_shapes_count = 1 + gt_shapes_count = 1 + self._frame_results[frame_id] = ComparisonReportFrameSummary( - annotations=self._generate_annotations_summary( + annotations=self._generate_frame_annotations_summary( confusion_matrix, confusion_matrix_labels ), annotation_components=ComparisonReportAnnotationComponentsSummary( @@ -1985,7 +2015,7 @@ def _find_closest_unmatched_shape(shape: dm.Annotation): # row/column index in the confusion matrix corresponding to unmatched annotations _UNMATCHED_IDX = -1 - def _make_zero_confusion_matrix(self) -> Tuple[List[str], np.ndarray, Dict[int, int]]: + def _make_zero_confusion_matrix(self) -> tuple[list[str], np.ndarray, dict[int, int]]: label_id_idx_map = {} label_names = [] for label_id, label in enumerate(self._gt_dataset.categories()[dm.AnnotationType.label]): @@ -2000,9 +2030,8 @@ def _make_zero_confusion_matrix(self) -> Tuple[List[str], np.ndarray, Dict[int, return label_names, confusion_matrix, label_id_idx_map - @classmethod - def _generate_annotations_summary( - cls, confusion_matrix: np.ndarray, confusion_matrix_labels: List[str] + def _compute_annotations_summary( + self, confusion_matrix: np.ndarray, confusion_matrix_labels: list[str] ) -> ComparisonReportAnnotationsSummary: matched_ann_counts = np.diag(confusion_matrix) ds_ann_counts = np.sum(confusion_matrix, axis=1) @@ -2022,10 +2051,10 @@ def _generate_annotations_summary( ) / (total_annotations_count or 1) valid_annotations_count = np.sum(matched_ann_counts) - missing_annotations_count = np.sum(confusion_matrix[cls._UNMATCHED_IDX, :]) - extra_annotations_count = np.sum(confusion_matrix[:, cls._UNMATCHED_IDX]) - ds_annotations_count = np.sum(ds_ann_counts[: cls._UNMATCHED_IDX]) - gt_annotations_count = np.sum(gt_ann_counts[: cls._UNMATCHED_IDX]) + missing_annotations_count = np.sum(confusion_matrix[self._UNMATCHED_IDX, :]) + extra_annotations_count = np.sum(confusion_matrix[:, self._UNMATCHED_IDX]) + ds_annotations_count = np.sum(ds_ann_counts[: self._UNMATCHED_IDX]) + gt_annotations_count = np.sum(gt_ann_counts[: self._UNMATCHED_IDX]) return ComparisonReportAnnotationsSummary( valid_count=valid_annotations_count, @@ -2044,12 +2073,24 @@ def _generate_annotations_summary( ), ) - def generate_report(self) -> ComparisonReport: - self._find_gt_conflicts() + def _generate_frame_annotations_summary( + self, confusion_matrix: np.ndarray, confusion_matrix_labels: list[str] + ) -> ComparisonReportAnnotationsSummary: + summary = self._compute_annotations_summary(confusion_matrix, confusion_matrix_labels) + + if self.settings.match_empty_frames and summary.total_count == 0: + # Add virtual annotations for empty frames + summary.valid_count = 1 + summary.total_count = 1 + summary.ds_count = 1 + summary.gt_count = 1 + + return summary + def _generate_dataset_annotations_summary( + self, frame_summaries: dict[int, ComparisonReportFrameSummary] + ) -> tuple[ComparisonReportAnnotationsSummary, ComparisonReportAnnotationComponentsSummary]: # accumulate stats - intersection_frames = [] - conflicts = [] annotation_components = ComparisonReportAnnotationComponentsSummary( shape=ComparisonReportAnnotationShapeSummary( valid_count=0, @@ -2067,19 +2108,52 @@ def generate_report(self) -> ComparisonReport: ), ) mean_ious = [] + empty_frame_count = 0 confusion_matrix_labels, confusion_matrix, _ = self._make_zero_confusion_matrix() - for frame_id, frame_result in self._frame_results.items(): - intersection_frames.append(frame_id) - conflicts += frame_result.conflicts + for frame_result in frame_summaries.values(): confusion_matrix += frame_result.annotations.confusion_matrix.rows + if not np.any(frame_result.annotations.confusion_matrix.rows): + empty_frame_count += 1 + if annotation_components is None: annotation_components = deepcopy(frame_result.annotation_components) else: annotation_components.accumulate(frame_result.annotation_components) + mean_ious.append(frame_result.annotation_components.shape.mean_iou) + annotation_summary = self._compute_annotations_summary( + confusion_matrix, confusion_matrix_labels + ) + + if self.settings.match_empty_frames and empty_frame_count: + # Add virtual annotations for empty frames, + # they are not included in the confusion matrix + annotation_summary.valid_count += empty_frame_count + annotation_summary.total_count += empty_frame_count + annotation_summary.ds_count += empty_frame_count + annotation_summary.gt_count += empty_frame_count + + # Cannot be computed in accumulate() + annotation_components.shape.mean_iou = np.mean(mean_ious) + + return annotation_summary, annotation_components + + def generate_report(self) -> ComparisonReport: + self._find_gt_conflicts() + + intersection_frames = [] + conflicts = [] + for frame_id, frame_result in self._frame_results.items(): + intersection_frames.append(frame_id) + conflicts += frame_result.conflicts + + annotation_summary, annotations_component_summary = ( + self._generate_dataset_annotations_summary(self._frame_results) + ) + return ComparisonReport( parameters=self.settings, comparison_summary=ComparisonReportComparisonSummary( @@ -2095,49 +2169,24 @@ def generate_report(self) -> ComparisonReport: [c for c in conflicts if c.severity == AnnotationConflictSeverity.ERROR] ), conflicts_by_type=Counter(c.type for c in conflicts), - annotations=self._generate_annotations_summary( - confusion_matrix, confusion_matrix_labels - ), - annotation_components=ComparisonReportAnnotationComponentsSummary( - shape=ComparisonReportAnnotationShapeSummary( - valid_count=annotation_components.shape.valid_count, - missing_count=annotation_components.shape.missing_count, - extra_count=annotation_components.shape.extra_count, - total_count=annotation_components.shape.total_count, - ds_count=annotation_components.shape.ds_count, - gt_count=annotation_components.shape.gt_count, - mean_iou=np.mean(mean_ious), - ), - label=ComparisonReportAnnotationLabelSummary( - valid_count=annotation_components.label.valid_count, - invalid_count=annotation_components.label.invalid_count, - total_count=annotation_components.label.total_count, - ), - ), + annotations=annotation_summary, + annotation_components=annotations_component_summary, ), frame_results=self._frame_results, ) class QualityReportUpdateManager: - _QUEUE_AUTOUPDATE_JOB_PREFIX = "update-quality-metrics-" _QUEUE_CUSTOM_JOB_PREFIX = "quality-check-" _RQ_CUSTOM_QUALITY_CHECK_JOB_TYPE = "custom_quality_check" _JOB_RESULT_TTL = 120 - @classmethod - def _get_quality_check_job_delay(cls) -> timedelta: - return timedelta(seconds=settings.QUALITY_CHECK_JOB_DELAY) - def _get_scheduler(self) -> RqScheduler: return django_rq.get_scheduler(settings.CVAT_QUEUES.QUALITY_REPORTS.value) def _get_queue(self) -> RqQueue: return django_rq.get_queue(settings.CVAT_QUEUES.QUALITY_REPORTS.value) - def _make_queue_job_id_base(self, task: Task) -> str: - return f"{self._QUEUE_AUTOUPDATE_JOB_PREFIX}task-{task.id}" - def _make_custom_quality_check_job_id(self, task_id: int, user_id: int) -> str: # FUTURE-TODO: it looks like job ID template should not include user_id because: # 1. There is no need to compute quality reports several times for different users @@ -2145,13 +2194,6 @@ def _make_custom_quality_check_job_id(self, task_id: int, user_id: int) -> str: # be able to check the status of the computation process return f"{self._QUEUE_CUSTOM_JOB_PREFIX}task-{task_id}-user-{user_id}" - @classmethod - def _get_last_report_time(cls, task: Task) -> Optional[timezone.datetime]: - report = models.QualityReport.objects.filter(task=task).order_by("-created_date").first() - if report: - return report.created_date - return None - class QualityReportsNotAvailable(Exception): pass @@ -2169,33 +2211,6 @@ def _check_quality_reporting_available(self, task: Task): f"and in the {StatusChoice.COMPLETED} state" ) - def _should_update(self, task: Task) -> bool: - try: - self._check_quality_reporting_available(task) - return True - except self.QualityReportsNotAvailable: - return False - - def schedule_quality_autoupdate_job(self, task: Task): - """ - This function schedules a quality report autoupdate job - """ - - if not self._should_update(task): - return - - now = timezone.now() - delay = self._get_quality_check_job_delay() - next_job_time = now.utcnow() + delay - - schedule_job_with_throttling( - settings.CVAT_QUEUES.QUALITY_REPORTS.value, - self._make_queue_job_id_base(task), - next_job_time, - self._check_task_quality, - task_id=task.id, - ) - class JobAlreadyExists(QualityReportsNotAvailable): def __str__(self): return "Quality computation job for this task already enqueued" @@ -2313,7 +2328,7 @@ def _compute_reports(self, task_id: int) -> int: in active_validation_frames ) - jobs: List[Job] = [j for j in job_queryset if j.type == JobType.ANNOTATION] + jobs: list[Job] = [j for j in job_queryset if j.type == JobType.ANNOTATION] job_data_providers = { job.id: JobDataProvider( job.id, @@ -2325,7 +2340,7 @@ def _compute_reports(self, task_id: int) -> int: quality_params = self._get_task_quality_params(task) - job_comparison_reports: Dict[int, ComparisonReport] = {} + job_comparison_reports: dict[int, ComparisonReport] = {} for job in jobs: job_data_provider = job_data_providers[job.id] comparator = DatasetComparator( @@ -2345,15 +2360,6 @@ def _compute_reports(self, task_id: int) -> int: except Task.DoesNotExist: return - last_report_time = self._get_last_report_time(task) - if not self.is_custom_quality_check_job(self._get_current_job()) and ( - last_report_time - and timezone.now() < last_report_time + self._get_quality_check_job_delay() - ): - # Discard this report as it has probably been computed in parallel - # with another one - return - job_quality_reports = {} for job in jobs: job_comparison_report = job_comparison_reports[job.id] @@ -2390,14 +2396,14 @@ def _get_current_job(self): return get_current_job() def _compute_task_report( - self, task: Task, job_reports: Dict[int, ComparisonReport] + self, task: Task, job_reports: dict[int, ComparisonReport] ) -> ComparisonReport: # The task dataset can be different from any jobs' dataset because of frame overlaps # between jobs, from which annotations are merged to get the task annotations. # Thus, a separate report could be computed for the task. Instead, here we only # compute the combined summary of the job reports. task_intersection_frames = set() - task_conflicts: List[AnnotationConflict] = [] + task_conflicts: list[AnnotationConflict] = [] task_annotations_summary = None task_ann_components_summary = None task_mean_shape_ious = [] @@ -2474,7 +2480,7 @@ def _compute_task_report( return task_report_data - def _save_reports(self, *, task_report: Dict, job_reports: List[Dict]) -> models.QualityReport: + def _save_reports(self, *, task_report: dict, job_reports: list[dict]) -> models.QualityReport: # TODO: add validation (e.g. ann id count for different types of conflicts) db_task_report = models.QualityReport( diff --git a/cvat/apps/quality_control/rules/conflicts.rego b/cvat/apps/quality_control/rules/conflicts.rego index f8e570b58826..883491128209 100644 --- a/cvat/apps/quality_control/rules/conflicts.rego +++ b/cvat/apps/quality_control/rules/conflicts.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/quality_control/rules/quality_reports.rego b/cvat/apps/quality_control/rules/quality_reports.rego index e9dd28b3ec32..98626a5f0ca3 100644 --- a/cvat/apps/quality_control/rules/quality_reports.rego +++ b/cvat/apps/quality_control/rules/quality_reports.rego @@ -11,7 +11,7 @@ import data.quality_utils # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/quality_control/rules/quality_settings.rego b/cvat/apps/quality_control/rules/quality_settings.rego index 1fc587159ee7..0b2f6b149e79 100644 --- a/cvat/apps/quality_control/rules/quality_settings.rego +++ b/cvat/apps/quality_control/rules/quality_settings.rego @@ -10,7 +10,7 @@ import data.organizations # "auth": { # "user": { # "id": , -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # }, # "organization": { # "id": , diff --git a/cvat/apps/quality_control/serializers.py b/cvat/apps/quality_control/serializers.py index fe6b372d9cb4..6164abc12200 100644 --- a/cvat/apps/quality_control/serializers.py +++ b/cvat/apps/quality_control/serializers.py @@ -81,6 +81,7 @@ class Meta: "max_validations_per_job", "iou_threshold", "oks_sigma", + "point_size_base", "line_thickness", "low_overlap_threshold", "compare_line_orientation", @@ -91,6 +92,7 @@ class Meta: "object_visibility_threshold", "panoptic_comparison", "compare_attributes", + "match_empty_frames", ) read_only_fields = ( "id", @@ -98,6 +100,7 @@ class Meta: ) extra_kwargs = {k: {"required": False} for k in fields} + extra_kwargs.setdefault("match_empty_frames", {}).setdefault("default", False) for field_name, help_text in { "target_metric": "The primary metric used for quality estimation", @@ -115,10 +118,26 @@ class Meta: """, "oks_sigma": """ Like IoU threshold, but for points. - The percent of the bbox area, used as the radius of the circle around the GT point, - where the checked point is expected to be. + The percent of the bbox side, used as the radius of the circle around the GT point, + where the checked point is expected to be. For boxes with different width and + height, the "side" is computed as a geometric mean of the width and height. Read more: https://cocodataset.org/#keypoints-eval """, + "point_size_base": """ + When comparing point annotations (including both separate points and point groups), + the OKS sigma parameter defines matching area for each GT point based to the + object size. The point size base parameter allows to configure how to determine + the object size. + If {image_size}, the image size is used. Useful if each point + annotation represents a separate object or boxes grouped with points do not + represent object boundaries. + If {group_bbox_size}, the object size is based on + the point group bbox size. Useful if each point group represents an object + or there is a bbox grouped with points, representing the object size. + """.format( + image_size=models.PointSizeBase.IMAGE_SIZE, + group_bbox_size=models.PointSizeBase.GROUP_BBOX_SIZE, + ), "line_thickness": """ Thickness of polylines, relatively to the (image area) ^ 0.5. The distance to the boundary around the GT line, @@ -147,6 +166,12 @@ class Meta: Use only the visible part of the masks and polygons in comparisons """, "compare_attributes": "Enables or disables annotation attribute comparison", + "match_empty_frames": """ + Count empty frames as matching. This affects target metrics like accuracy in cases + there are no annotations. If disabled, frames without annotations + are counted as not matching (accuracy is 0). If enabled, accuracy will be 1 instead. + This will also add virtual annotations to empty frames in the comparison results. + """, }.items(): extra_kwargs.setdefault(field_name, {}).setdefault( "help_text", textwrap.dedent(help_text.lstrip("\n")) diff --git a/cvat/apps/quality_control/signals.py b/cvat/apps/quality_control/signals.py index 7371608c3ce9..b56d70e99109 100644 --- a/cvat/apps/quality_control/signals.py +++ b/cvat/apps/quality_control/signals.py @@ -1,54 +1,14 @@ -# Copyright (C) 2023 CVAT.ai Corporation +# Copyright (C) 2023-2024 CVAT.ai Corporation # # SPDX-License-Identifier: MIT -from django.db import transaction from django.db.models.signals import post_save from django.dispatch import receiver -from cvat.apps.engine.models import Annotation, Job, Project, Task -from cvat.apps.quality_control import quality_reports as qc +from cvat.apps.engine.models import Job, Task from cvat.apps.quality_control.models import QualitySettings -@receiver(post_save, sender=Job, dispatch_uid=__name__ + ".save_job-update_quality_metrics") -@receiver(post_save, sender=Task, dispatch_uid=__name__ + ".save_task-update_quality_metrics") -@receiver(post_save, sender=Project, dispatch_uid=__name__ + ".save_project-update_quality_metrics") -@receiver( - post_save, sender=Annotation, dispatch_uid=__name__ + ".save_annotation-update_quality_metrics" -) -@receiver( - post_save, - sender=QualitySettings, - dispatch_uid=__name__ + ".save_settings-update_quality_metrics", -) -def __save_job__update_quality_metrics(instance, created, **kwargs): - tasks = [] - - if isinstance(instance, Project): - tasks += list(instance.tasks.all()) - elif isinstance(instance, Task): - tasks.append(instance) - elif isinstance(instance, Job): - tasks.append(instance.segment.task) - elif isinstance(instance, Annotation): - tasks.append(instance.job.segment.task) - elif isinstance(instance, QualitySettings): - tasks.append(instance.task) - else: - assert False - - def schedule_autoupdate_jobs(): - for task in tasks: - if task.id is None: - # The task may have been deleted after the on_commit call. - continue - - qc.QualityReportUpdateManager().schedule_quality_autoupdate_job(task) - - transaction.on_commit(schedule_autoupdate_jobs, robust=True) - - @receiver(post_save, sender=Task, dispatch_uid=__name__ + ".save_task-initialize_quality_settings") @receiver(post_save, sender=Job, dispatch_uid=__name__ + ".save_job-initialize_quality_settings") def __save_task__initialize_quality_settings(instance, created, **kwargs): diff --git a/cvat/apps/webhooks/rules/tests/generators/webhooks_test.gen.rego.py b/cvat/apps/webhooks/rules/tests/generators/webhooks_test.gen.rego.py index c367a42cc98b..66417f3d096d 100644 --- a/cvat/apps/webhooks/rules/tests/generators/webhooks_test.gen.rego.py +++ b/cvat/apps/webhooks/rules/tests/generators/webhooks_test.gen.rego.py @@ -40,7 +40,7 @@ def read_rules(name): SCOPES = list({rule["scope"] for rule in simple_rules}) CONTEXTS = ["sandbox", "organization"] OWNERSHIPS = ["project:owner", "owner", "none"] -GROUPS = ["admin", "business", "user", "worker", "none"] +GROUPS = ["admin", "user", "worker", "none"] ORG_ROLES = ["owner", "maintainer", "supervisor", "worker", None] SAME_ORG = [True, False] diff --git a/cvat/apps/webhooks/rules/webhooks.rego b/cvat/apps/webhooks/rules/webhooks.rego index a74a88c6a965..85d577a21ee7 100644 --- a/cvat/apps/webhooks/rules/webhooks.rego +++ b/cvat/apps/webhooks/rules/webhooks.rego @@ -11,7 +11,7 @@ import data.organizations # "auth": { # "user": { # "id": -# "privilege": <"admin"|"business"|"user"|"worker"> or null +# "privilege": <"admin"|"user"|"worker"> or null # } # "organization": { # "id": , diff --git a/cvat/asgi.py b/cvat/asgi.py index 44ddd0d87131..2fbe40a8d4c6 100644 --- a/cvat/asgi.py +++ b/cvat/asgi.py @@ -24,6 +24,7 @@ if debug.is_debugging_enabled(): + class DebuggerApp(ASGIHandler): """ Support for VS code debugger diff --git a/cvat/nginx.conf b/cvat/nginx.conf index 392c49d61a30..9cf14332abed 100644 --- a/cvat/nginx.conf +++ b/cvat/nginx.conf @@ -41,14 +41,27 @@ http { # CVAT Settings ## + # Only add security headers if the upstream server does not already provide them. + map $upstream_http_referrer_policy $hdr_referrer_policy { + '' "strict-origin-when-cross-origin"; + } + + map $upstream_http_x_content_type_options $hdr_x_content_type_options { + '' "nosniff"; + } + + map $upstream_http_x_frame_options $hdr_x_frame_options { + '' "deny"; + } + server { listen 8080; # previously used value client_max_body_size 1G; - add_header X-Frame-Options deny; - add_header Referrer-Policy "strict-origin-when-cross-origin" always; - add_header X-Content-Type-Options "nosniff" always; + add_header Referrer-Policy $hdr_referrer_policy always; + add_header X-Content-Type-Options $hdr_x_content_type_options always; + add_header X-Frame-Options $hdr_x_frame_options always; server_name _; diff --git a/cvat/requirements/all.txt b/cvat/requirements/all.txt index 4e05dcc9e85f..482db32ecf87 100644 --- a/cvat/requirements/all.txt +++ b/cvat/requirements/all.txt @@ -8,5 +8,3 @@ -r development.txt -r production.txt -r testing.txt - -# The following packages are considered to be unsafe in a requirements file: diff --git a/cvat/requirements/base.in b/cvat/requirements/base.in index edd8c065dbc0..fd86b51f99dc 100644 --- a/cvat/requirements/base.in +++ b/cvat/requirements/base.in @@ -12,7 +12,7 @@ azure-storage-blob==12.13.0 boto3==1.17.61 clickhouse-connect==0.6.8 coreapi==2.3.3 -datumaro @ git+https://github.com/cvat-ai/datumaro.git@e612d1bfb76a3c3d3d545187338c841a246619fb +datumaro @ git+https://github.com/cvat-ai/datumaro.git@bf0374689df50599a34a4f220b9e5329aca695ce dj-pagination==2.5.0 # Despite direct indication allauth in requirements we should keep 'with_social' for dj-rest-auth # to avoid possible further versions conflicts (we use registration functionality) @@ -29,7 +29,7 @@ django-health-check>=3.18.1,<4 django-rq==2.8.1 django-sendfile2==0.7.0 Django~=4.2.7 -djangorestframework~=3.14.0 +djangorestframework>=3.15.2,<4 drf-spectacular==0.26.2 furl==2.1.0 google-cloud-storage==1.42.0 diff --git a/cvat/requirements/base.txt b/cvat/requirements/base.txt index ffaf10bd0e71..fe4518b64e44 100644 --- a/cvat/requirements/base.txt +++ b/cvat/requirements/base.txt @@ -1,4 +1,4 @@ -# SHA1:9ff984f33ae139c68d90acc3e338c6cef7ecf6e9 +# SHA1:1bed6e1afea11473b164df79d7d166f419074359 # # This file is autogenerated by pip-compile-multi # To update, run: @@ -8,14 +8,14 @@ -r ../../utils/dataset_manifest/requirements.txt asgiref==3.8.1 # via django -async-timeout==4.0.3 +async-timeout==5.0.1 # via redis attrs==21.4.0 # via # -r cvat/requirements/base.in # datumaro # jsonschema -azure-core==1.31.0 +azure-core==1.32.0 # via # azure-storage-blob # msrest @@ -29,14 +29,14 @@ botocore==1.20.112 # s3transfer cachetools==5.5.0 # via google-auth -certifi==2024.8.30 +certifi==2024.12.14 # via # clickhouse-connect # msrest # requests cffi==1.17.1 # via cryptography -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via rq @@ -50,25 +50,25 @@ coreschema==0.0.4 # via coreapi crontab==1.0.1 # via rq-scheduler -cryptography==43.0.1 +cryptography==44.0.0 # via # azure-storage-blob # pyjwt cycler==0.12.1 # via matplotlib -datumaro @ git+https://github.com/cvat-ai/datumaro.git@e612d1bfb76a3c3d3d545187338c841a246619fb +datumaro @ git+https://github.com/cvat-ai/datumaro.git@bf0374689df50599a34a4f220b9e5329aca695ce # via -r cvat/requirements/base.in defusedxml==0.7.1 # via # datumaro # python3-openid -deprecated==1.2.14 +deprecated==1.2.15 # via limits dj-pagination==2.5.0 # via -r cvat/requirements/base.in dj-rest-auth[with-social]==5.0.2 # via -r cvat/requirements/base.in -django==4.2.16 +django==4.2.17 # via # -r cvat/requirements/base.in # dj-rest-auth @@ -105,7 +105,7 @@ django-rq==2.8.1 # via -r cvat/requirements/base.in django-sendfile2==0.7.0 # via -r cvat/requirements/base.in -djangorestframework==3.14.0 +djangorestframework==3.15.2 # via # -r cvat/requirements/base.in # dj-rest-auth @@ -116,17 +116,17 @@ easyprocess==1.1 # via pyunpack entrypoint2==1.1 # via pyunpack -fonttools==4.54.1 +fonttools==4.55.3 # via matplotlib freezegun==1.5.1 # via rq-scheduler furl==2.1.0 # via -r cvat/requirements/base.in -google-api-core==2.20.0 +google-api-core==2.24.0 # via # google-cloud-core # google-cloud-storage -google-auth==2.35.0 +google-auth==2.37.0 # via # google-api-core # google-cloud-core @@ -139,7 +139,7 @@ google-crc32c==1.6.0 # via google-resumable-media google-resumable-media==2.7.2 # via google-cloud-storage -googleapis-common-protos==1.65.0 +googleapis-common-protos==1.66.0 # via google-api-core h5py==3.12.1 # via datumaro @@ -148,10 +148,10 @@ idna==3.10 importlib-metadata==8.5.0 # via clickhouse-connect importlib-resources==6.4.5 - # via limits + # via nibabel inflection==0.5.1 # via drf-spectacular -isodate==0.6.1 +isodate==0.7.2 # via # msrest # python3-saml @@ -167,7 +167,7 @@ jsonschema==4.17.3 # via drf-spectacular kiwisolver==1.4.7 # via matplotlib -limits==3.13.0 +limits==3.14.1 # via python-logstash-async lxml==5.3.0 # via @@ -177,7 +177,7 @@ lxml==5.3.0 # xmlsec lz4==4.3.3 # via clickhouse-connect -markupsafe==2.1.5 +markupsafe==3.0.2 # via jinja2 matplotlib==3.8.4 # via @@ -187,17 +187,17 @@ mmh3==5.0.1 # via pottery msrest==0.7.1 # via azure-storage-blob -networkx==3.3 +networkx==3.4.2 # via datumaro -nibabel==5.2.1 +nibabel==5.3.2 # via datumaro oauthlib==3.2.2 # via requests-oauthlib orderedmultidict==1.0.1 # via furl -orjson==3.10.7 +orjson==3.10.12 # via datumaro -packaging==24.1 +packaging==24.2 # via # limits # matplotlib @@ -211,9 +211,9 @@ pdf2image==1.14.0 # via -r cvat/requirements/base.in pottery==3.0.0 # via -r cvat/requirements/base.in -proto-plus==1.24.0 +proto-plus==1.25.0 # via google-api-core -protobuf==5.28.2 +protobuf==5.29.1 # via # google-api-core # googleapis-common-protos @@ -236,11 +236,11 @@ pycocotools==2.0.8 # via datumaro pycparser==2.22 # via cffi -pyjwt[crypto]==2.9.0 +pyjwt[crypto]==2.10.1 # via django-allauth pylogbeat==2.0.1 # via python-logstash-async -pyparsing==3.1.4 +pyparsing==3.2.0 # via matplotlib pyrsistent==0.20.0 # via jsonschema @@ -264,7 +264,6 @@ python3-saml==1.16.0 pytz==2024.2 # via # clickhouse-connect - # djangorestframework # pandas pyunpack==0.2.1 # via -r cvat/requirements/base.in @@ -309,7 +308,7 @@ rsa==4.9 # via google-auth ruamel-yaml==0.18.6 # via datumaro -ruamel-yaml-clib==0.2.8 +ruamel-yaml-clib==0.2.12 # via ruamel-yaml rules==3.5 # via -r cvat/requirements/base.in @@ -319,14 +318,13 @@ scipy==1.13.1 # via datumaro shapely==1.7.1 # via -r cvat/requirements/base.in -six==1.16.0 +six==1.17.0 # via # azure-core # furl - # isodate # orderedmultidict # python-dateutil -sqlparse==0.5.1 +sqlparse==0.5.3 # via django tensorboardx==2.6.2.2 # via datumaro @@ -336,6 +334,7 @@ typing-extensions==4.12.2 # azure-core # datumaro # limits + # nibabel # pottery tzdata==2024.2 # via pandas @@ -348,13 +347,13 @@ urllib3==1.26.20 # botocore # clickhouse-connect # requests -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated xmlsec==1.3.14 # via # -r cvat/requirements/base.in # python3-saml -zipp==3.20.2 +zipp==3.21.0 # via importlib-metadata zstandard==0.23.0 # via clickhouse-connect diff --git a/cvat/requirements/development.in b/cvat/requirements/development.in index ad5a5b6557ec..9c5e0662b52d 100644 --- a/cvat/requirements/development.in +++ b/cvat/requirements/development.in @@ -1,10 +1,6 @@ -r base.in -black>=24.1 django-extensions==3.0.8 django-silk==5.* -pylint-django==2.5.3 -pylint-plugin-utils==0.7 -pylint==2.14.5 rope==0.17.0 snakeviz==2.1.0 diff --git a/cvat/requirements/development.txt b/cvat/requirements/development.txt index 3a76b3048511..cc730b7916eb 100644 --- a/cvat/requirements/development.txt +++ b/cvat/requirements/development.txt @@ -1,4 +1,4 @@ -# SHA1:b71f4fe955f645187b7ccdf82b05f6a8d61eb3ab +# SHA1:cd8d0825dc4cfe37b22a489422105acba5483fe4 # # This file is autogenerated by pip-compile-multi # To update, run: @@ -6,61 +6,21 @@ # pip-compile-multi # -r base.txt -astroid==2.11.7 - # via pylint autopep8==2.3.1 # via django-silk -black==24.8.0 - # via -r cvat/requirements/development.in -dill==0.3.9 - # via pylint django-extensions==3.0.8 # via -r cvat/requirements/development.in -django-silk==5.2.0 +django-silk==5.3.2 # via -r cvat/requirements/development.in gprof2dot==2024.6.6 # via django-silk -isort==5.13.2 - # via pylint -lazy-object-proxy==1.10.0 - # via astroid -mccabe==0.7.0 - # via pylint -mypy-extensions==1.0.0 - # via black -pathspec==0.12.1 - # via black -platformdirs==4.3.6 - # via - # black - # pylint pycodestyle==2.12.1 # via autopep8 -pylint==2.14.5 - # via - # -r cvat/requirements/development.in - # pylint-django - # pylint-plugin-utils -pylint-django==2.5.3 - # via -r cvat/requirements/development.in -pylint-plugin-utils==0.7 - # via - # -r cvat/requirements/development.in - # pylint-django rope==0.17.0 # via -r cvat/requirements/development.in snakeviz==2.1.0 # via -r cvat/requirements/development.in -tomli==2.0.2 - # via - # autopep8 - # black - # pylint -tomlkit==0.13.2 - # via pylint -tornado==6.4.1 +tomli==2.2.1 + # via autopep8 +tornado==6.4.2 # via snakeviz - -# The following packages are considered to be unsafe in a requirements file: -setuptools==75.1.0 - # via astroid diff --git a/cvat/requirements/production.txt b/cvat/requirements/production.txt index 9c845a83424b..155d626a6984 100644 --- a/cvat/requirements/production.txt +++ b/cvat/requirements/production.txt @@ -6,7 +6,7 @@ # pip-compile-multi # -r base.txt -anyio==4.6.0 +anyio==4.7.0 # via watchfiles coverage==7.2.3 # via -r cvat/requirements/production.in @@ -14,7 +14,7 @@ exceptiongroup==1.2.2 # via anyio h11==0.14.0 # via uvicorn -httptools==0.6.1 +httptools==0.6.4 # via uvicorn python-dotenv==1.0.1 # via uvicorn @@ -22,9 +22,9 @@ sniffio==1.3.1 # via anyio uvicorn[standard]==0.22.0 # via -r cvat/requirements/production.in -uvloop==0.20.0 +uvloop==0.21.0 # via uvicorn -watchfiles==0.24.0 +watchfiles==1.0.3 # via uvicorn -websockets==13.1 +websockets==14.1 # via uvicorn diff --git a/cvat/requirements/testing.txt b/cvat/requirements/testing.txt index 90c8a13254c0..86ab66664526 100644 --- a/cvat/requirements/testing.txt +++ b/cvat/requirements/testing.txt @@ -14,5 +14,3 @@ lupa==1.14.1 # via fakeredis sortedcontainers==2.4.0 # via fakeredis - -# The following packages are considered to be unsafe in a requirements file: diff --git a/cvat/rq_patching.py b/cvat/rq_patching.py index cd8c1ac74225..a12bcaaaedd3 100644 --- a/cvat/rq_patching.py +++ b/cvat/rq_patching.py @@ -32,18 +32,25 @@ def custom_started_job_registry_cleanup(self, timestamp: Optional[float] = None) job_ids = self.get_expired_job_ids(score) if job_ids: - failed_job_registry = rq.registry.FailedJobRegistry(self.name, self.connection, serializer=self.serializer) + failed_job_registry = rq.registry.FailedJobRegistry( + self.name, self.connection, serializer=self.serializer + ) queue = self.get_queue() with self.connection.pipeline() as pipeline: for job_id in job_ids: try: - job = self.job_class.fetch(job_id, connection=self.connection, serializer=self.serializer) + job = self.job_class.fetch( + job_id, connection=self.connection, serializer=self.serializer + ) except NoSuchJobError: continue job.execute_failure_callback( - self.death_penalty_class, AbandonedJobError, AbandonedJobError(), traceback.extract_stack() + self.death_penalty_class, + AbandonedJobError, + AbandonedJobError(), + traceback.extract_stack(), ) retry = job.retries_left and job.retries_left > 0 @@ -54,8 +61,8 @@ def custom_started_job_registry_cleanup(self, timestamp: Optional[float] = None) else: exc_string = f"due to {AbandonedJobError.__name__}" rq.registry.logger.warning( - f'{self.__class__.__name__} cleanup: Moving job to {rq.registry.FailedJobRegistry.__name__} ' - f'({exc_string})' + f"{self.__class__.__name__} cleanup: Moving job to {rq.registry.FailedJobRegistry.__name__} " + f"({exc_string})" ) job.set_status(JobStatus.FAILED) job._exc_info = f"Moved to {rq.registry.FailedJobRegistry.__name__}, {exc_string}, at {datetime.now()}" @@ -69,7 +76,8 @@ def custom_started_job_registry_cleanup(self, timestamp: Optional[float] = None) return job_ids + def update_started_job_registry_cleanup() -> None: # don't forget to check if the issue https://github.com/rq/rq/issues/2006 has been resolved in upstream - assert VERSION == '1.16.0' + assert VERSION == "1.16.0" rq.registry.StartedJobRegistry.cleanup = custom_started_job_registry_cleanup diff --git a/cvat/rqworker.py b/cvat/rqworker.py index d368a1ef2629..309c4d5ce714 100644 --- a/cvat/rqworker.py +++ b/cvat/rqworker.py @@ -9,7 +9,6 @@ import cvat.utils.remote_debugger as debug - DefaultWorker = Worker @@ -42,12 +41,14 @@ def execute_job(self, *args, **kwargs): # errors during debugging # https://stackoverflow.com/questions/8242837/django-multiprocessing-and-database-connections/10684672#10684672 from django import db + db.connections.close_all() return self.perform_job(*args, **kwargs) if debug.is_debugging_enabled(): + class RemoteDebugWorker(SimpleWorker): """ Support for VS code debugger @@ -68,6 +69,7 @@ def execute_job(self, *args, **kwargs): if os.environ.get("COVERAGE_PROCESS_START"): import coverage + default_exit = os._exit def coverage_exit(*args, **kwargs): diff --git a/cvat/schema.yml b/cvat/schema.yml index 4a33b80c24ae..8af068ecc8b2 100644 --- a/cvat/schema.yml +++ b/cvat/schema.yml @@ -1,7 +1,7 @@ openapi: 3.0.3 info: title: CVAT REST API - version: 2.22.0 + version: 2.24.1 description: REST API for Computer Vision Annotation Tool (CVAT) termsOfService: https://www.google.com/policies/terms/ contact: @@ -8049,15 +8049,6 @@ components: job: type: integer description: The id of the job to be annotated - quality: - allOf: - - $ref: '#/components/schemas/QualityEnum' - default: original - description: |- - The quality of the images to use in the model run - - * `compressed` - compressed - * `original` - original max_distance: type: integer threshold: @@ -8069,7 +8060,11 @@ components: description: Whether existing annotations should be removed convMaskToPoly: type: boolean - default: false + writeOnly: true + description: Deprecated; use conv_mask_to_poly instead + conv_mask_to_poly: + type: boolean + description: Convert mask shapes to polygons mapping: type: object additionalProperties: @@ -8995,6 +8990,9 @@ components: - slug PaginatedAnnotationConflictList: type: object + required: + - count + - results properties: count: type: integer @@ -9015,6 +9013,9 @@ components: $ref: '#/components/schemas/AnnotationConflict' PaginatedCloudStorageReadList: type: object + required: + - count + - results properties: count: type: integer @@ -9035,6 +9036,9 @@ components: $ref: '#/components/schemas/CloudStorageRead' PaginatedCommentReadList: type: object + required: + - count + - results properties: count: type: integer @@ -9055,6 +9059,9 @@ components: $ref: '#/components/schemas/CommentRead' PaginatedInvitationReadList: type: object + required: + - count + - results properties: count: type: integer @@ -9075,6 +9082,9 @@ components: $ref: '#/components/schemas/InvitationRead' PaginatedIssueReadList: type: object + required: + - count + - results properties: count: type: integer @@ -9095,6 +9105,9 @@ components: $ref: '#/components/schemas/IssueRead' PaginatedJobReadList: type: object + required: + - count + - results properties: count: type: integer @@ -9115,6 +9128,9 @@ components: $ref: '#/components/schemas/JobRead' PaginatedLabelList: type: object + required: + - count + - results properties: count: type: integer @@ -9135,6 +9151,9 @@ components: $ref: '#/components/schemas/Label' PaginatedMembershipReadList: type: object + required: + - count + - results properties: count: type: integer @@ -9155,6 +9174,9 @@ components: $ref: '#/components/schemas/MembershipRead' PaginatedMetaUserList: type: object + required: + - count + - results properties: count: type: integer @@ -9175,6 +9197,9 @@ components: $ref: '#/components/schemas/MetaUser' PaginatedOrganizationReadList: type: object + required: + - count + - results properties: count: type: integer @@ -9195,6 +9220,9 @@ components: $ref: '#/components/schemas/OrganizationRead' PaginatedProjectReadList: type: object + required: + - count + - results properties: count: type: integer @@ -9215,6 +9243,9 @@ components: $ref: '#/components/schemas/ProjectRead' PaginatedQualityReportList: type: object + required: + - count + - results properties: count: type: integer @@ -9235,6 +9266,9 @@ components: $ref: '#/components/schemas/QualityReport' PaginatedQualitySettingsList: type: object + required: + - count + - results properties: count: type: integer @@ -9255,6 +9289,9 @@ components: $ref: '#/components/schemas/QualitySettings' PaginatedRequestList: type: object + required: + - count + - results properties: count: type: integer @@ -9275,6 +9312,9 @@ components: $ref: '#/components/schemas/Request' PaginatedTaskReadList: type: object + required: + - count + - results properties: count: type: integer @@ -9295,6 +9335,9 @@ components: $ref: '#/components/schemas/TaskRead' PaginatedWebhookDeliveryReadList: type: object + required: + - count + - results properties: count: type: integer @@ -9315,6 +9358,9 @@ components: $ref: '#/components/schemas/WebhookDeliveryRead' PaginatedWebhookReadList: type: object + required: + - count + - results properties: count: type: integer @@ -9658,9 +9704,28 @@ components: format: double description: | Like IoU threshold, but for points. - The percent of the bbox area, used as the radius of the circle around the GT point, - where the checked point is expected to be. + The percent of the bbox side, used as the radius of the circle around the GT point, + where the checked point is expected to be. For boxes with different width and + height, the "side" is computed as a geometric mean of the width and height. Read more: https://cocodataset.org/#keypoints-eval + point_size_base: + allOf: + - $ref: '#/components/schemas/PointSizeBaseEnum' + description: |- + When comparing point annotations (including both separate points and point groups), + the OKS sigma parameter defines matching area for each GT point based to the + object size. The point size base parameter allows to configure how to determine + the object size. + If image_size, the image size is used. Useful if each point + annotation represents a separate object or boxes grouped with points do not + represent object boundaries. + If group_bbox_size, the object size is based on + the point group bbox size. Useful if each point group represents an object + or there is a bbox grouped with points, representing the object size. + + + * `image_size` - IMAGE_SIZE + * `group_bbox_size` - GROUP_BBOX_SIZE line_thickness: type: number format: double @@ -9710,6 +9775,14 @@ components: compare_attributes: type: boolean description: Enables or disables annotation attribute comparison + match_empty_frames: + type: boolean + default: false + description: | + Count empty frames as matching. This affects target metrics like accuracy in cases + there are no annotations. If disabled, frames without annotations + are counted as not matching (accuracy is 0). If enabled, accuracy will be 1 instead. + This will also add virtual annotations to empty frames in the comparison results. PatchedTaskValidationLayoutWriteRequest: type: object properties: @@ -9854,6 +9927,14 @@ components: - GIT_INTEGRATION - MODELS - PREDICT + PointSizeBaseEnum: + enum: + - image_size + - group_bbox_size + type: string + description: |- + * `image_size` - IMAGE_SIZE + * `group_bbox_size` - GROUP_BBOX_SIZE ProjectFileRequest: type: object properties: @@ -9985,14 +10066,6 @@ components: * `AZURE_CONTAINER` - AZURE_CONTAINER * `GOOGLE_DRIVE` - GOOGLE_DRIVE * `GOOGLE_CLOUD_STORAGE` - GOOGLE_CLOUD_STORAGE - QualityEnum: - enum: - - compressed - - original - type: string - description: |- - * `compressed` - compressed - * `original` - original QualityReport: type: object properties: @@ -10138,9 +10211,28 @@ components: format: double description: | Like IoU threshold, but for points. - The percent of the bbox area, used as the radius of the circle around the GT point, - where the checked point is expected to be. + The percent of the bbox side, used as the radius of the circle around the GT point, + where the checked point is expected to be. For boxes with different width and + height, the "side" is computed as a geometric mean of the width and height. Read more: https://cocodataset.org/#keypoints-eval + point_size_base: + allOf: + - $ref: '#/components/schemas/PointSizeBaseEnum' + description: |- + When comparing point annotations (including both separate points and point groups), + the OKS sigma parameter defines matching area for each GT point based to the + object size. The point size base parameter allows to configure how to determine + the object size. + If image_size, the image size is used. Useful if each point + annotation represents a separate object or boxes grouped with points do not + represent object boundaries. + If group_bbox_size, the object size is based on + the point group bbox size. Useful if each point group represents an object + or there is a bbox grouped with points, representing the object size. + + + * `image_size` - IMAGE_SIZE + * `group_bbox_size` - GROUP_BBOX_SIZE line_thickness: type: number format: double @@ -10190,6 +10282,14 @@ components: compare_attributes: type: boolean description: Enables or disables annotation attribute comparison + match_empty_frames: + type: boolean + default: false + description: | + Count empty frames as matching. This affects target metrics like accuracy in cases + there are no annotations. If disabled, frames without annotations + are counted as not matching (accuracy is 0). If enabled, accuracy will be 1 instead. + This will also add virtual annotations to empty frames in the comparison results. RegisterSerializerEx: type: object properties: @@ -11103,6 +11203,9 @@ components: type: string format: date-time readOnly: true + has_analytics_access: + type: boolean + readOnly: true required: - groups - username diff --git a/cvat/settings/base.py b/cvat/settings/base.py index d698fe563531..0f6147dc4bf0 100644 --- a/cvat/settings/base.py +++ b/cvat/settings/base.py @@ -177,7 +177,9 @@ def generate_secret_key(): 'OLD_PASSWORD_FIELD_ENABLED': True, } -if to_bool(os.getenv('CVAT_ANALYTICS', False)): +ANALYTICS_ENABLED = to_bool(os.getenv('CVAT_ANALYTICS', False)) + +if ANALYTICS_ENABLED: INSTALLED_APPS += ['cvat.apps.log_viewer'] MIDDLEWARE = [ @@ -234,7 +236,7 @@ def generate_secret_key(): IAM_ADMIN_ROLE = 'admin' # Index in the list below corresponds to the priority (0 has highest priority) -IAM_ROLES = [IAM_ADMIN_ROLE, 'business', 'user', 'worker'] +IAM_ROLES = [IAM_ADMIN_ROLE, 'user', 'worker'] IAM_OPA_HOST = 'http://opa:8181' IAM_OPA_DATA_URL = f'{IAM_OPA_HOST}/v1/data' LOGIN_URL = 'rest_login' @@ -274,6 +276,7 @@ class CVAT_QUEUES(Enum): QUALITY_REPORTS = 'quality_reports' ANALYTICS_REPORTS = 'analytics_reports' CLEANING = 'cleaning' + CHUNKS = 'chunks' redis_inmem_host = os.getenv('CVAT_REDIS_INMEM_HOST', 'localhost') redis_inmem_port = os.getenv('CVAT_REDIS_INMEM_PORT', 6379) @@ -319,6 +322,10 @@ class CVAT_QUEUES(Enum): **shared_queue_settings, 'DEFAULT_TIMEOUT': '1h', }, + CVAT_QUEUES.CHUNKS.value: { + **shared_queue_settings, + 'DEFAULT_TIMEOUT': '5m', + }, } NUCLIO = { @@ -339,6 +346,15 @@ class CVAT_QUEUES(Enum): 'cvat.apps.events.handlers.handle_rq_exception', ] +PERIODIC_RQ_JOBS = [ + { + 'queue': CVAT_QUEUES.CLEANING.value, + 'id': 'clean_up_sessions', + 'func': 'cvat.apps.iam.utils.clean_up_sessions', + 'cron_string': '0 0 * * *', + }, +] + # JavaScript and CSS compression # https://django-compressor.readthedocs.io @@ -521,12 +537,6 @@ class CVAT_QUEUES(Enum): DATA_UPLOAD_MAX_NUMBER_FIELDS = None # this django check disabled DATA_UPLOAD_MAX_NUMBER_FILES = None -RESTRICTIONS = { - # allow access to analytics component to users with business role - # otherwise, only the administrator has access - 'analytics_visibility': True, -} - redis_ondisk_host = os.getenv('CVAT_REDIS_ONDISK_HOST', 'localhost') # The default port is not Redis's default port (6379). # This is so that a developer can run both in-mem Redis and on-disk Kvrocks on their machine @@ -534,14 +544,20 @@ class CVAT_QUEUES(Enum): redis_ondisk_port = os.getenv('CVAT_REDIS_ONDISK_PORT', 6666) redis_ondisk_password = os.getenv('CVAT_REDIS_ONDISK_PASSWORD', '') +# Sets the timeout for the expiration of data chunk in redis_ondisk +CVAT_CHUNK_CACHE_TTL = 3600 * 24 # 1 day + +# Sets the timeout for the expiration of preview image in redis_ondisk +CVAT_PREVIEW_CACHE_TTL = 3600 * 24 * 7 # 7 days + CACHES = { - 'default': { + 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, 'media': { - 'BACKEND' : 'django.core.cache.backends.redis.RedisCache', - "LOCATION": f"redis://:{urllib.parse.quote(redis_ondisk_password)}@{redis_ondisk_host}:{redis_ondisk_port}", - 'TIMEOUT' : 3600 * 24, # 1 day + 'BACKEND' : 'django.core.cache.backends.redis.RedisCache', + "LOCATION": f'redis://:{urllib.parse.quote(redis_ondisk_password)}@{redis_ondisk_host}:{redis_ondisk_port}', + 'TIMEOUT' : CVAT_CHUNK_CACHE_TTL, } } @@ -569,6 +585,8 @@ class CVAT_QUEUES(Enum): # How django uses X-Forwarded-Proto - https://docs.djangoproject.com/en/2.2/ref/settings/#secure-proxy-ssl-header SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') +SECURE_REFERRER_POLICY = 'strict-origin-when-cross-origin' + # Forwarded host - https://docs.djangoproject.com/en/4.0/ref/settings/#std:setting-USE_X_FORWARDED_HOST # Is used in TUS uploads to provide correct upload endpoint USE_X_FORWARDED_HOST = True diff --git a/cvat/settings/development.py b/cvat/settings/development.py index 2898fc88628f..ece4e544d2e6 100644 --- a/cvat/settings/development.py +++ b/cvat/settings/development.py @@ -64,6 +64,4 @@ # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES['default']['HOST'] = os.getenv('CVAT_POSTGRES_HOST', 'localhost') -QUALITY_CHECK_JOB_DELAY = 5 - SMOKESCREEN_ENABLED = False diff --git a/cvat/settings/testing_rest.py b/cvat/settings/testing_rest.py index addda985492a..6203421bb7a0 100644 --- a/cvat/settings/testing_rest.py +++ b/cvat/settings/testing_rest.py @@ -11,11 +11,6 @@ "django.contrib.auth.hashers.MD5PasswordHasher", ] -# Avoid quality updates during test runs. -# Note that DB initialization triggers server signals, -# so quality report updates are scheduled for applicable jobs. -QUALITY_CHECK_JOB_DELAY = 10000 - IMPORT_CACHE_CLEAN_DELAY = timedelta(seconds=30) # The tests should not fail due to high disk utilization of CI infrastructure that we have no control over diff --git a/cvat/urls.py b/cvat/urls.py index 144ed619f766..08257a14b811 100644 --- a/cvat/urls.py +++ b/cvat/urls.py @@ -23,31 +23,31 @@ from django.urls import path, include urlpatterns = [ - path('admin/', admin.site.urls), - path('', include('cvat.apps.engine.urls')), - path('django-rq/', include('django_rq.urls')), + path("admin/", admin.site.urls), + path("", include("cvat.apps.engine.urls")), + path("django-rq/", include("django_rq.urls")), ] -if apps.is_installed('cvat.apps.log_viewer'): - urlpatterns.append(path('', include('cvat.apps.log_viewer.urls'))) +if apps.is_installed("cvat.apps.log_viewer"): + urlpatterns.append(path("", include("cvat.apps.log_viewer.urls"))) -if apps.is_installed('cvat.apps.events'): - urlpatterns.append(path('api/', include('cvat.apps.events.urls'))) +if apps.is_installed("cvat.apps.events"): + urlpatterns.append(path("api/", include("cvat.apps.events.urls"))) -if apps.is_installed('cvat.apps.lambda_manager'): - urlpatterns.append(path('', include('cvat.apps.lambda_manager.urls'))) +if apps.is_installed("cvat.apps.lambda_manager"): + urlpatterns.append(path("", include("cvat.apps.lambda_manager.urls"))) -if apps.is_installed('cvat.apps.webhooks'): - urlpatterns.append(path('api/', include('cvat.apps.webhooks.urls'))) +if apps.is_installed("cvat.apps.webhooks"): + urlpatterns.append(path("api/", include("cvat.apps.webhooks.urls"))) -if apps.is_installed('cvat.apps.quality_control'): - urlpatterns.append(path('api/', include('cvat.apps.quality_control.urls'))) +if apps.is_installed("cvat.apps.quality_control"): + urlpatterns.append(path("api/", include("cvat.apps.quality_control.urls"))) -if apps.is_installed('silk'): - urlpatterns.append(path('profiler/', include('silk.urls'))) +if apps.is_installed("silk"): + urlpatterns.append(path("profiler/", include("silk.urls"))) -if apps.is_installed('health_check'): - urlpatterns.append(path('api/server/health/', include('health_check.urls'))) +if apps.is_installed("health_check"): + urlpatterns.append(path("api/server/health/", include("health_check.urls"))) -if apps.is_installed('cvat.apps.analytics_report'): - urlpatterns.append(path('api/', include('cvat.apps.analytics_report.urls'))) +if apps.is_installed("cvat.apps.analytics_report"): + urlpatterns.append(path("api/", include("cvat.apps.analytics_report.urls"))) diff --git a/cvat/utils/background_jobs.py b/cvat/utils/background_jobs.py deleted file mode 100644 index caf2e859a530..000000000000 --- a/cvat/utils/background_jobs.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2024 CVAT.ai Corporation -# -# SPDX-License-Identifier: MIT - -from collections.abc import Callable -from datetime import datetime - -import django_rq - -def schedule_job_with_throttling( - queue_name: str, - job_id_base: str, - scheduled_time: datetime, - func: Callable, - **func_kwargs -) -> None: - """ - This function schedules an RQ job to run at `scheduled_time`, - unless it had already been used to schedule a job to run at some future time - with the same values of `queue_name` and `job_id_base`, - in which case it does nothing. - - The scheduled job will have an ID beginning with `job_id_base`, - and will execute `func(**func_kwargs)`. - """ - with django_rq.get_connection(queue_name) as connection: - # The blocker key is used to implement the throttling. - # The first time this function is called for a given tuple of - # (queue_name, job_id_base), we schedule the job and create a blocker - # that expires at the same time as the job is supposed to start. - # Until the blocker expires, we don't schedule any more jobs - # with the same tuple. - blocker_key = f"cvat:utils:scheduling-blocker:{queue_name}:{job_id_base}" - if connection.exists(blocker_key): - return - - queue_job_id = f"{job_id_base}-{scheduled_time.timestamp()}" - - # TODO: reuse the Redis connection if Django-RQ allows it. - # See . - django_rq.get_scheduler(queue_name).enqueue_at( - scheduled_time, - func, - **func_kwargs, - job_id=queue_job_id, - ) - - connection.set(blocker_key, queue_job_id, exat=scheduled_time) diff --git a/cvat/utils/http.py b/cvat/utils/http.py index b2ed89a5d555..2cb1b7498b32 100644 --- a/cvat/utils/http.py +++ b/cvat/utils/http.py @@ -19,11 +19,12 @@ if settings.SMOKESCREEN_ENABLED: PROXIES_FOR_UNTRUSTED_URLS = { - 'http': 'http://localhost:4750', - 'https': 'http://localhost:4750', + "http": "http://localhost:4750", + "https": "http://localhost:4750", } + def make_requests_session() -> requests.Session: session = requests.Session() - session.headers['User-Agent'] = _CVAT_USER_AGENT + session.headers["User-Agent"] = _CVAT_USER_AGENT return session diff --git a/cvat/utils/remote_debugger.py b/cvat/utils/remote_debugger.py index b4d01baf3c31..bc6ef40ae0e2 100644 --- a/cvat/utils/remote_debugger.py +++ b/cvat/utils/remote_debugger.py @@ -6,7 +6,8 @@ def is_debugging_enabled() -> bool: - return os.environ.get('CVAT_DEBUG_ENABLED') == 'yes' + return os.environ.get("CVAT_DEBUG_ENABLED") == "yes" + if is_debugging_enabled(): import debugpy @@ -21,8 +22,8 @@ class RemoteDebugger: Read more: https://modwsgi.readthedocs.io/en/develop/user-guides/debugging-techniques.html """ - ENV_VAR_PORT = 'CVAT_DEBUG_PORT' - ENV_VAR_WAIT = 'CVAT_DEBUG_WAIT' + ENV_VAR_PORT = "CVAT_DEBUG_PORT" + ENV_VAR_WAIT = "CVAT_DEBUG_WAIT" __debugger_initialized = False @classmethod @@ -35,7 +36,7 @@ def _singleton_init(cls): # The only intended use is in Docker. # Using 127.0.0.1 will not allow host connections - addr = ('0.0.0.0', port) # nosec - B104:hardcoded_bind_all_interfaces + addr = ("0.0.0.0", port) # nosec - B104:hardcoded_bind_all_interfaces # Debugpy is a singleton # We put it in the main thread of the process and then report new threads @@ -45,7 +46,7 @@ def _singleton_init(cls): # Feel free to enable if needed. debugpy.configure({"subProcess": False}) - if os.environ.get(cls.ENV_VAR_WAIT) == 'yes': + if os.environ.get(cls.ENV_VAR_WAIT) == "yes": debugpy.wait_for_client() except Exception as ex: raise Exception("failed to set debugger") from ex diff --git a/cvat/utils/version.py b/cvat/utils/version.py index ecc79eea7051..8b1b53a10384 100644 --- a/cvat/utils/version.py +++ b/cvat/utils/version.py @@ -11,6 +11,7 @@ import os import subprocess + def get_version(version): """Return a PEP 440-compliant version number from VERSION.""" # Now build the two parts of the version number: @@ -20,21 +21,23 @@ def get_version(version): main = get_main_version(version) - sub = '' - if version[3] == 'alpha' and version[4] == 0: + sub = "" + if version[3] == "alpha" and version[4] == 0: git_changeset = get_git_changeset() if git_changeset: - sub = '.dev%s' % git_changeset + sub = ".dev%s" % git_changeset - elif version[3] != 'final': - mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'} + elif version[3] != "final": + mapping = {"alpha": "a", "beta": "b", "rc": "rc"} sub = mapping[version[3]] + str(version[4]) return main + sub + def get_main_version(version): """Return main version (X.Y.Z) from VERSION.""" - return '.'.join(str(x) for x in version[:3]) + return ".".join(str(x) for x in version[:3]) + def get_git_changeset(): """Return a numeric identifier of the latest git changeset. @@ -44,14 +47,16 @@ def get_git_changeset(): so it's sufficient for generating the development version numbers. """ repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - git_log = subprocess.Popen( # nosec: B603, B607 - ['git', 'log', '--pretty=format:%ct', '--quiet', '-1', 'HEAD'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - cwd=repo_dir, universal_newlines=True, + git_log = subprocess.Popen( # nosec: B603, B607 + ["git", "log", "--pretty=format:%ct", "--quiet", "-1", "HEAD"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=repo_dir, + universal_newlines=True, ) timestamp = git_log.communicate()[0] try: timestamp = datetime.datetime.fromtimestamp(int(timestamp), tz=datetime.timezone.utc) except ValueError: return None - return timestamp.strftime('%Y%m%d%H%M%S') + return timestamp.strftime("%Y%m%d%H%M%S") diff --git a/dev/check_changelog_fragments.py b/dev/check_changelog_fragments.py index d417bcd669f5..437e3fb02cdb 100755 --- a/dev/check_changelog_fragments.py +++ b/dev/check_changelog_fragments.py @@ -6,17 +6,18 @@ REPO_ROOT = Path(__file__).resolve().parents[1] + def main(): scriv_config = configparser.ConfigParser() - scriv_config.read(REPO_ROOT / 'changelog.d/scriv.ini') + scriv_config.read(REPO_ROOT / "changelog.d/scriv.ini") - scriv_section = scriv_config['scriv'] - assert scriv_section['format'] == 'md' + scriv_section = scriv_config["scriv"] + assert scriv_section["format"] == "md" - md_header_level = int(scriv_section['md_header_level']) - md_header_prefix = '#' * md_header_level + '# ' + md_header_level = int(scriv_section["md_header_level"]) + md_header_prefix = "#" * md_header_level + "# " - categories = {s.strip() for s in scriv_section['categories'].split(',')} + categories = {s.strip() for s in scriv_section["categories"].split(",")} success = True @@ -25,19 +26,33 @@ def complain(message): success = False print(f"{fragment_path.relative_to(REPO_ROOT)}:{line_index+1}: {message}", file=sys.stderr) - for fragment_path in REPO_ROOT.glob('changelog.d/*.md'): + for fragment_path in REPO_ROOT.glob("changelog.d/*.md"): with open(fragment_path) as fragment_file: for line_index, line in enumerate(fragment_file): - if not line.startswith(md_header_prefix): - # The first line should be a header, and all headers should be of appropriate level. - if line_index == 0 or line.startswith('#'): - complain(f"line should start with {md_header_prefix!r}") - continue - - category = line.removeprefix(md_header_prefix).strip() - if category not in categories: - complain(f"unknown category: {category}") + line = line.rstrip("\n") + + if line_index == 0: + # The first line should always be a header. + if not line.startswith("#"): + complain("line should be a header") + elif ( + line + and not line.startswith("#") + and not line.startswith("-") + and not line.startswith(" ") + ): + complain("line should be a header, a list item, or indented") + + if line.startswith("#"): + if line.startswith(md_header_prefix): + category = line.removeprefix(md_header_prefix).strip() + if category not in categories: + complain(f"unknown category: {category}") + else: + # All headers should be of the same level. + complain(f"header should start with {md_header_prefix!r}") sys.exit(0 if success else 1) + main() diff --git a/dev/format_python_code.sh b/dev/format_python_code.sh index 257915acfdc2..db18ce328dc4 100755 --- a/dev/format_python_code.sh +++ b/dev/format_python_code.sh @@ -30,6 +30,10 @@ for paths in \ "cvat/apps/engine/default_settings.py" \ "cvat/apps/engine/field_validation.py" \ "cvat/apps/engine/model_utils.py" \ + "cvat/apps/engine/task_validation.py" \ + "cvat/apps/dataset_manager/tests/test_annotation.py" \ + "cvat/apps/dataset_manager/tests/utils.py" \ + "cvat/apps/events/signals.py" \ ; do ${BLACK} -- ${paths} ${ISORT} -- ${paths} diff --git a/dev/requirements.txt b/dev/requirements.txt new file mode 100644 index 000000000000..4603689ae469 --- /dev/null +++ b/dev/requirements.txt @@ -0,0 +1,5 @@ +black==24.* +isort==5.* +pylint-django==2.5.3 +pylint-plugin-utils==0.7 +pylint==2.14.5 diff --git a/dev/update_version.py b/dev/update_version.py index 6cdaf313f968..bc175aa16dd0 100755 --- a/dev/update_version.py +++ b/dev/update_version.py @@ -6,43 +6,47 @@ import sys from dataclasses import dataclass from pathlib import Path -from typing import Callable, Match, Pattern +from re import Match, Pattern +from typing import Callable -SUCCESS_CHAR = '\u2714' -FAIL_CHAR = '\u2716' +SUCCESS_CHAR = "\u2714" +FAIL_CHAR = "\u2716" -CVAT_VERSION_PATTERN = re.compile(r'VERSION\s*=\s*\((\d+),\s*(\d*),\s*(\d+),\s*[\',\"](\w+)[\',\"],\s*(\d+)\)') +CVAT_VERSION_PATTERN = re.compile( + r"VERSION\s*=\s*\((\d+),\s*(\d*),\s*(\d+),\s*[\',\"](\w+)[\',\"],\s*(\d+)\)" +) REPO_ROOT_DIR = Path(__file__).resolve().parents[1] -CVAT_INIT_PY_REL_PATH = 'cvat/__init__.py' +CVAT_INIT_PY_REL_PATH = "cvat/__init__.py" CVAT_INIT_PY_PATH = REPO_ROOT_DIR / CVAT_INIT_PY_REL_PATH + @dataclass() class Version: major: int = 0 minor: int = 0 patch: int = 0 - prerelease: str = '' + prerelease: str = "" prerelease_number: int = 0 def __str__(self) -> str: - return f'{self.major}.{self.minor}.{self.patch}-{self.prerelease}.{self.prerelease_number}' + return f"{self.major}.{self.minor}.{self.patch}-{self.prerelease}.{self.prerelease_number}" def cvat_repr(self): - return f"({self.major}, {self.minor}, {self.patch}, '{self.prerelease}', {self.prerelease_number})" + return f'({self.major}, {self.minor}, {self.patch}, "{self.prerelease}", {self.prerelease_number})' def compose_repr(self): - if self.prerelease != 'final': - return 'dev' - return f'v{self.major}.{self.minor}.{self.patch}' + if self.prerelease != "final": + return "dev" + return f"v{self.major}.{self.minor}.{self.patch}" def increment_prerelease_number(self) -> None: self.prerelease_number += 1 def increment_prerelease(self) -> None: - flow = ('alpha', 'beta', 'rc', 'final') + flow = ("alpha", "beta", "rc", "final") idx = flow.index(self.prerelease) if idx == len(flow) - 1: raise ValueError(f"Cannot increment current '{self.prerelease}' prerelease version") @@ -51,9 +55,9 @@ def increment_prerelease(self) -> None: self._set_default_prerelease_number() def set_prerelease(self, value: str) -> None: - values = ('alpha', 'beta', 'rc', 'final') + values = ("alpha", "beta", "rc", "final") if value not in values: - raise ValueError(f'{value} is a wrong, must be one of {values}') + raise ValueError(f"{value} is a wrong, must be one of {values}") self.prerelease = value self._set_default_prerelease_number() @@ -71,15 +75,15 @@ def increment_major(self) -> None: self._set_default_minor() def set(self, v: str) -> None: - self.major, self.minor, self.patch = map(int, v.split('.')) - self.prerelease = 'final' + self.major, self.minor, self.patch = map(int, v.split(".")) + self.prerelease = "final" self.prerelease_number = 0 def _set_default_prerelease_number(self) -> None: self.prerelease_number = 0 def _set_default_prerelease(self) -> None: - self.prerelease = 'alpha' + self.prerelease = "alpha" self._set_default_prerelease_number() def _set_default_patch(self) -> None: @@ -90,6 +94,7 @@ def _set_default_minor(self) -> None: self.minor = 0 self._set_default_patch() + @dataclass(frozen=True) class ReplacementRule: rel_path: str @@ -101,89 +106,113 @@ def apply(self, new_version: Version, *, verify_only: bool) -> bool: text = path.read_text() new_text, num_replacements = self.pattern.subn( - functools.partial(self.replacement, new_version), text) + functools.partial(self.replacement, new_version), text + ) if not num_replacements: - print(f'{FAIL_CHAR} {self.rel_path}: failed to match version pattern.') + print(f"{FAIL_CHAR} {self.rel_path}: failed to match version pattern.") return False if text == new_text: if verify_only: - print(f'{SUCCESS_CHAR} {self.rel_path}: verified.') + print(f"{SUCCESS_CHAR} {self.rel_path}: verified.") else: - print(f'{SUCCESS_CHAR} {self.rel_path}: no need to update.') + print(f"{SUCCESS_CHAR} {self.rel_path}: no need to update.") else: if verify_only: - print(f'{FAIL_CHAR} {self.rel_path}: verification failed.') + print(f"{FAIL_CHAR} {self.rel_path}: verification failed.") return False else: path.write_text(new_text) - print(f'{SUCCESS_CHAR} {self.rel_path}: updated.') + print(f"{SUCCESS_CHAR} {self.rel_path}: updated.") return True -REPLACEMENT_RULES = [ - ReplacementRule(CVAT_INIT_PY_REL_PATH, CVAT_VERSION_PATTERN, - lambda v, m: f'VERSION = {v.cvat_repr()}'), - - ReplacementRule('docker-compose.yml', - re.compile(r'(\$\{CVAT_VERSION:-)([\w.]+)(\})'), - lambda v, m: m[1] + v.compose_repr() + m[3]), - - ReplacementRule('helm-chart/values.yaml', - re.compile(r'(^ image: cvat/(?:ui|server)\n tag: )([\w.]+)', re.M), - lambda v, m: m[1] + v.compose_repr()), - ReplacementRule('cvat-sdk/gen/generate.sh', +REPLACEMENT_RULES = [ + ReplacementRule( + CVAT_INIT_PY_REL_PATH, CVAT_VERSION_PATTERN, lambda v, m: f"VERSION = {v.cvat_repr()}" + ), + ReplacementRule( + "docker-compose.yml", + re.compile(r"(\$\{CVAT_VERSION:-)([\w.]+)(\})"), + lambda v, m: m[1] + v.compose_repr() + m[3], + ), + ReplacementRule( + "helm-chart/values.yaml", + re.compile(r"(^ image: cvat/(?:ui|server)\n tag: )([\w.]+)", re.M), + lambda v, m: m[1] + v.compose_repr(), + ), + ReplacementRule( + "cvat-sdk/gen/generate.sh", re.compile(r'^VERSION="[\d.]+"$', re.M), - lambda v, m: f'VERSION="{v.major}.{v.minor}.{v.patch}"'), - - ReplacementRule('cvat/schema.yml', + lambda v, m: f'VERSION="{v.major}.{v.minor}.{v.patch}"', + ), + ReplacementRule( + "cvat/schema.yml", re.compile(r"^ version: [\d.]+$", re.M), - lambda v, m: f' version: {v.major}.{v.minor}.{v.patch}'), - - ReplacementRule('cvat-cli/src/cvat_cli/version.py', + lambda v, m: f" version: {v.major}.{v.minor}.{v.patch}", + ), + ReplacementRule( + "cvat-cli/src/cvat_cli/version.py", re.compile(r'^VERSION = "[\d.]+"$', re.M), - lambda v, m: f'VERSION = "{v.major}.{v.minor}.{v.patch}"'), - - ReplacementRule('cvat-cli/requirements/base.txt', - re.compile(r'^cvat-sdk~=[\d.]+$', re.M), - lambda v, m: f'cvat-sdk~={v.major}.{v.minor}.{v.patch}'), + lambda v, m: f'VERSION = "{v.major}.{v.minor}.{v.patch}"', + ), + ReplacementRule( + "cvat-cli/requirements/base.txt", + re.compile(r"^cvat-sdk~=[\d.]+$", re.M), + lambda v, m: f"cvat-sdk~={v.major}.{v.minor}.{v.patch}", + ), ] + def get_current_version() -> Version: version_text = CVAT_INIT_PY_PATH.read_text() match = re.search(CVAT_VERSION_PATTERN, version_text) if not match: - raise RuntimeError(f'Failed to find version in {CVAT_INIT_PY_PATH}') + raise RuntimeError(f"Failed to find version in {CVAT_INIT_PY_PATH}") return Version(int(match[1]), int(match[2]), int(match[3]), match[4], int(match[5])) + def main() -> None: - parser = argparse.ArgumentParser(description='Bump CVAT version') + parser = argparse.ArgumentParser(description="Bump CVAT version") action_group = parser.add_mutually_exclusive_group(required=True) - action_group.add_argument('--major', action='store_true', - help='Increment the existing major version by 1') - action_group.add_argument('--minor', action='store_true', - help='Increment the existing minor version by 1') - action_group.add_argument('--patch', action='store_true', - help='Increment the existing patch version by 1') - action_group.add_argument('--prerelease', nargs='?', const='increment', - help='''Increment prerelease version alpha->beta->rc->final, - Also it's possible to pass value explicitly''') - action_group.add_argument('--prerelease_number', action='store_true', - help='Increment prerelease number by 1') - - action_group.add_argument('--current', '--show-current', - action='store_true', help='Display current version') - action_group.add_argument('--verify-current', - action='store_true', help='Check that all version numbers are consistent') - - action_group.add_argument('--set', metavar='X.Y.Z', - help='Set the version to the specified version') + action_group.add_argument( + "--major", action="store_true", help="Increment the existing major version by 1" + ) + action_group.add_argument( + "--minor", action="store_true", help="Increment the existing minor version by 1" + ) + action_group.add_argument( + "--patch", action="store_true", help="Increment the existing patch version by 1" + ) + action_group.add_argument( + "--prerelease", + nargs="?", + const="increment", + help="""Increment prerelease version alpha->beta->rc->final, + Also it's possible to pass value explicitly""", + ) + action_group.add_argument( + "--prerelease_number", action="store_true", help="Increment prerelease number by 1" + ) + + action_group.add_argument( + "--current", "--show-current", action="store_true", help="Display current version" + ) + action_group.add_argument( + "--verify-current", + action="store_true", + help="Check that all version numbers are consistent", + ) + + action_group.add_argument( + "--set", metavar="X.Y.Z", help="Set the version to the specified version" + ) args = parser.parse_args() @@ -201,7 +230,7 @@ def main() -> None: version.increment_prerelease_number() elif args.prerelease: - if args.prerelease == 'increment': + if args.prerelease == "increment": version.increment_prerelease() else: version.set_prerelease(args.prerelease) @@ -222,9 +251,9 @@ def main() -> None: assert False, "Unreachable code" if verify_only: - print(f'Verifying that version is {version}...') + print(f"Verifying that version is {version}...") else: - print(f'Bumping version to {version}...') + print(f"Bumping version to {version}...") print() success = True @@ -239,5 +268,6 @@ def main() -> None: else: sys.exit("\nFailed to update one or more files!") -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/docker-compose.external_db.yml b/docker-compose.external_db.yml index decd1e9ed141..8112c59fd4f4 100644 --- a/docker-compose.external_db.yml +++ b/docker-compose.external_db.yml @@ -27,6 +27,7 @@ services: cvat_worker_import: *backend-settings cvat_worker_quality_reports: *backend-settings cvat_worker_webhooks: *backend-settings + cvat_worker_chunks: *backend-settings secrets: postgres_password: diff --git a/docker-compose.yml b/docker-compose.yml index 569e163e9fe5..c13cb5bab74f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: MIT +name: cvat + x-backend-env: &backend-env CVAT_POSTGRES_HOST: cvat_db CVAT_REDIS_INMEM_HOST: cvat_redis_inmem @@ -224,6 +226,22 @@ services: networks: - cvat + cvat_worker_chunks: + container_name: cvat_worker_chunks + image: cvat/server:${CVAT_VERSION:-dev} + restart: always + depends_on: *backend-deps + environment: + <<: *backend-env + NUMPROCS: 2 + command: run worker.chunks + volumes: + - cvat_data:/home/django/data + - cvat_keys:/home/django/keys + - cvat_logs:/home/django/logs + networks: + - cvat + cvat_ui: container_name: cvat_ui image: cvat/ui:${CVAT_VERSION:-dev} @@ -329,6 +347,7 @@ services: cvat_grafana: image: grafana/grafana-oss:10.1.2 + restart: always container_name: cvat_grafana environment: <<: *clickhouse-env diff --git a/helm-chart/templates/cvat_backend/worker_chunks/deployment.yml b/helm-chart/templates/cvat_backend/worker_chunks/deployment.yml new file mode 100644 index 000000000000..74e80b1b185d --- /dev/null +++ b/helm-chart/templates/cvat_backend/worker_chunks/deployment.yml @@ -0,0 +1,96 @@ +{{- $localValues := .Values.cvat.backend.worker.chunks -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-backend-worker-chunks + namespace: {{ .Release.Namespace }} + labels: + app: cvat-app + tier: backend + component: worker-chunks + {{- include "cvat.labels" . | nindent 4 }} + {{- with merge $localValues.labels .Values.cvat.backend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with merge $localValues.annotations .Values.cvat.backend.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ $localValues.replicas }} + strategy: + type: Recreate + selector: + matchLabels: + {{- include "cvat.labels" . | nindent 6 }} + {{- with merge $localValues.labels .Values.cvat.backend.labels }} + {{- toYaml . | nindent 6 }} + {{- end }} + app: cvat-app + tier: backend + component: worker-chunks + template: + metadata: + labels: + app: cvat-app + tier: backend + component: worker-chunks + {{- include "cvat.labels" . | nindent 8 }} + {{- with merge $localValues.labels .Values.cvat.backend.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with merge $localValues.annotations .Values.cvat.backend.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "cvat.backend.serviceAccountName" . }} + containers: + - name: cvat-backend + image: {{ .Values.cvat.backend.image }}:{{ .Values.cvat.backend.tag }} + imagePullPolicy: {{ .Values.cvat.backend.imagePullPolicy }} + {{- with merge $localValues.resources .Values.cvat.backend.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + args: ["run", "worker.chunks"] + env: + {{ include "cvat.sharedBackendEnv" . | indent 10 }} + {{- with concat .Values.cvat.backend.additionalEnv $localValues.additionalEnv }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- $probeArgs := list "chunks" -}} + {{- $probeConfig := dict "args" $probeArgs "livenessProbe" $.Values.cvat.backend.worker.livenessProbe -}} + {{ include "cvat.backend.worker.livenessProbe" $probeConfig | indent 10 }} + volumeMounts: + - mountPath: /home/django/data + name: cvat-backend-data + subPath: data + - mountPath: /home/django/logs + name: cvat-backend-data + subPath: logs + {{- with concat .Values.cvat.backend.additionalVolumeMounts $localValues.additionalVolumeMounts }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with merge $localValues.affinity .Values.cvat.backend.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with concat .Values.cvat.backend.tolerations $localValues.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.cvat.backend.defaultStorage.enabled }} + - name: cvat-backend-data + persistentVolumeClaim: + claimName: "{{ .Release.Name }}-backend-data" + {{- end }} + {{- with concat .Values.cvat.backend.additionalVolumes $localValues.additionalVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/helm-chart/test.values.yaml b/helm-chart/test.values.yaml index 73edaa815d70..350cc384c178 100644 --- a/helm-chart/test.values.yaml +++ b/helm-chart/test.values.yaml @@ -14,6 +14,15 @@ cvat: value: cvat.settings.testing_rest worker: import: + replicas: 1 + additionalVolumeMounts: + - mountPath: /home/django/share + name: cvat-backend-data + subPath: share + export: + replicas: 1 + chunks: + replicas: 1 additionalVolumeMounts: - mountPath: /home/django/share name: cvat-backend-data @@ -22,6 +31,8 @@ cvat: additionalEnv: - name: DJANGO_SETTINGS_MODULE value: cvat.settings.testing_rest + annotation: + replicas: 0 # Images are already present in the node imagePullPolicy: Never frontend: diff --git a/helm-chart/values.yaml b/helm-chart/values.yaml index 05d74e906e98..782840f2ed28 100644 --- a/helm-chart/values.yaml +++ b/helm-chart/values.yaml @@ -117,6 +117,16 @@ cvat: additionalEnv: [] additionalVolumes: [] additionalVolumeMounts: [] + chunks: + replicas: 2 + labels: {} + annotations: {} + resources: {} + affinity: {} + tolerations: [] + additionalEnv: [] + additionalVolumes: [] + additionalVolumeMounts: [] utils: replicas: 1 labels: {} @@ -134,8 +144,7 @@ cvat: permissionFix: enabled: true service: - annotations: - traefik.ingress.kubernetes.io/service.sticky.cookie: "true" + annotations: {} spec: type: ClusterIP ports: @@ -466,7 +475,7 @@ ingress: ## kubernetes.io/ingress.class: nginx ## annotations: {} - ## @param ingress.className IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## @param ingress.className IngressClass that will be used to implement the Ingress (Kubernetes 1.18+) ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ ## diff --git a/pyproject.toml b/pyproject.toml index 581552a67ebc..528bdc579fcc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,4 +6,15 @@ skip_gitignore = true # align tool behavior with Black [tool.black] line-length = 100 -target-version = ['py38'] +target-version = ['py39'] +extend-exclude = """ +# TODO: get rid of these +^/cvat/apps/( + dataset_manager|dataset_repo|engine|events + |health|iam|lambda_manager|log_viewer + |organizations|webhooks +)/ +| ^/cvat/settings/ +| ^/serverless/ +| ^/utils/dataset_manifest/ +""" diff --git a/rqscheduler.py b/rqscheduler.py index 82b7499baf89..5ae76e64a7f0 100644 --- a/rqscheduler.py +++ b/rqscheduler.py @@ -9,5 +9,5 @@ from rq_scheduler.scripts import rqscheduler -if __name__ == '__main__': +if __name__ == "__main__": rqscheduler.main() diff --git a/serverless/deploy_cpu.sh b/serverless/deploy_cpu.sh index 03d6f17bad67..1e3834edbd99 100755 --- a/serverless/deploy_cpu.sh +++ b/serverless/deploy_cpu.sh @@ -25,7 +25,10 @@ do echo "Deploying $func_rel_path function..." nuctl deploy --project-name cvat --path "$func_root" \ - --file "$func_config" --platform local + --file "$func_config" --platform local \ + --env CVAT_FUNCTIONS_REDIS_HOST=cvat_redis_ondisk \ + --env CVAT_FUNCTIONS_REDIS_PORT=6666 \ + --platform-config '{"attributes": {"network": "cvat_cvat"}}' done nuctl get function --platform local diff --git a/serverless/deploy_gpu.sh b/serverless/deploy_gpu.sh index c813a8232ad4..49d71ff352c3 100755 --- a/serverless/deploy_gpu.sh +++ b/serverless/deploy_gpu.sh @@ -17,7 +17,10 @@ do echo "Deploying $func_rel_path function..." nuctl deploy --project-name cvat --path "$func_root" \ - --file "$func_config" --platform local + --file "$func_config" --platform local \ + --env CVAT_FUNCTIONS_REDIS_HOST=cvat_redis_ondisk \ + --env CVAT_FUNCTIONS_REDIS_PORT=6666 \ + --platform-config '{"attributes": {"network": "cvat_cvat"}}' done nuctl get function --platform local diff --git a/serverless/onnx/WongKinYiu/yolov7/nuclio/function-gpu.yaml b/serverless/onnx/WongKinYiu/yolov7/nuclio/function-gpu.yaml index d9235c458155..a1f3ac70e88d 100644 --- a/serverless/onnx/WongKinYiu/yolov7/nuclio/function-gpu.yaml +++ b/serverless/onnx/WongKinYiu/yolov7/nuclio/function-gpu.yaml @@ -95,7 +95,7 @@ spec: eventTimeout: 30s build: image: cvat.onnx.wongkinyiu.yolov7:latest-gpu - baseImage: nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04 + baseImage: nvidia/cuda:12.6.3-cudnn-runtime-ubuntu22.04 directives: preCopy: @@ -106,7 +106,7 @@ spec: - kind: WORKDIR value: /opt/nuclio - kind: RUN - value: pip install onnxruntime-gpu=='1.16.*' opencv-python-headless pillow pyyaml + value: pip install onnxruntime-gpu=='1.20.*' opencv-python-headless pillow pyyaml - kind: WORKDIR value: /opt/nuclio - kind: RUN diff --git a/site/build_docs.py b/site/build_docs.py index 25af0b0e8f82..a01c437ae64c 100755 --- a/site/build_docs.py +++ b/site/build_docs.py @@ -10,7 +10,7 @@ import subprocess import tempfile from pathlib import Path -from typing import Dict, Optional +from typing import Optional import git import toml @@ -98,7 +98,7 @@ def run_npm_install(): def run_hugo( destination_dir: os.PathLike, *, - extra_env_vars: Dict[str, str] = None, + extra_env_vars: dict[str, str] = None, executable: Optional[str] = "hugo", ): extra_kwargs = {} @@ -157,9 +157,7 @@ def validate_env(): try: subprocess.run([hugo, "version"], capture_output=True) # nosec except (subprocess.CalledProcessError, FileNotFoundError) as ex: - raise Exception( - f"Failed to run '{hugo}', please make sure it exists." - ) from ex + raise Exception(f"Failed to run '{hugo}', please make sure it exists.") from ex if __name__ == "__main__": diff --git a/site/content/en/docs/administration/advanced/analytics.md b/site/content/en/docs/administration/advanced/analytics.md index 36425c341a32..6d26e1a75cb3 100644 --- a/site/content/en/docs/administration/advanced/analytics.md +++ b/site/content/en/docs/administration/advanced/analytics.md @@ -18,6 +18,10 @@ and enhance user satisfaction. CVAT analytics are available from the top menu. +Superusers and users with administrator role have access to analytics. +Permission to access analytics can also be granted when editing a user +on admin page by `Has access to analytics` checkbox. + ![CVAT Analytics](/images/analytics_menu.jpg) > Note: CVAT analytics and monitoring are available only for on-prem solution. @@ -65,7 +69,7 @@ docker compose up -d ### Ports settings If you cannot access analytics on -development environnement, +development environment, see {{< ilink "/docs/contributing/development-environment#cvat-analytics-ports" "Analytics Ports" >}} ### Events log structure @@ -131,6 +135,12 @@ Server events: - `call:function` +- `create:membership`, `update:membership`, `delete:membership` + +- `create:webhook`, `update:webhook`, `delete:webhook` + +- `create:invitation`, `delete:invitation` + Client events: - `load:cvat` diff --git a/site/content/en/docs/administration/advanced/backup_guide.md b/site/content/en/docs/administration/advanced/backup_guide.md index 2ebac212420d..b773e5e24681 100644 --- a/site/content/en/docs/administration/advanced/backup_guide.md +++ b/site/content/en/docs/administration/advanced/backup_guide.md @@ -20,7 +20,7 @@ Docker volumes are used to store all CVAT data: - `cvat_keys`: used to store the [Django secret key](https://docs.djangoproject.com/en/4.2/ref/settings/#std-setting-SECRET_KEY). Mounted into `cvat` container by `/home/django/keys` path. -- `cvat_logs`: used to store logs of CVAT backend processes managed by supervisord. +- `cvat_logs`: used to store logs of CVAT backend processes managed by the supervisord service. Mounted into `cvat` container by `/home/django/logs` path. - `cvat_events_db`: this volume is used to store Clickhouse database files. diff --git a/site/content/en/docs/administration/advanced/custom_certificates.md b/site/content/en/docs/administration/advanced/custom_certificates.md index 3ea9367974f9..780602a816f1 100644 --- a/site/content/en/docs/administration/advanced/custom_certificates.md +++ b/site/content/en/docs/administration/advanced/custom_certificates.md @@ -48,7 +48,7 @@ tls: keyFile: /certs/key.pem ``` -Edit the `docker-compose.https.yml` file and change the traefik servise configuration as follows: +Edit the `docker-compose.https.yml` file and change the traefik service configuration as follows: ```yaml traefik: diff --git a/site/content/en/docs/administration/advanced/ldap.md b/site/content/en/docs/administration/advanced/ldap.md index c1b6be282f26..c57824d13fa3 100644 --- a/site/content/en/docs/administration/advanced/ldap.md +++ b/site/content/en/docs/administration/advanced/ldap.md @@ -100,9 +100,6 @@ AUTHENTICATION_BACKENDS += ['django_auth_ldap.backend.LDAPBackend'] AUTH_LDAP_ADMIN_GROUPS = [ 'CN=CVAT Admins,%s' % _BASE_DN, ] -AUTH_LDAP_BUSINESS_GROUPS = [ - 'CN=CVAT Managers,%s' % _BASE_DN, -] AUTH_LDAP_WORKER_GROUPS = [ 'CN=CVAT Workers,%s' % _BASE_DN, ] @@ -112,7 +109,6 @@ AUTH_LDAP_USER_GROUPS = [ DJANGO_AUTH_LDAP_GROUPS = { "admin": AUTH_LDAP_ADMIN_GROUPS, - "business": AUTH_LDAP_BUSINESS_GROUPS, "user": AUTH_LDAP_USER_GROUPS, "worker": AUTH_LDAP_WORKER_GROUPS, } @@ -181,9 +177,6 @@ AUTHENTICATION_BACKENDS += ['django_auth_ldap.backend.LDAPBackend'] AUTH_LDAP_ADMIN_GROUPS = [ 'CN=cvat_admins,CN=Groups,%s' % _BASE_DN, ] -AUTH_LDAP_BUSINESS_GROUPS = [ - 'CN=cvat_managers,CN=Groups,%s' % _BASE_DN, -] AUTH_LDAP_WORKER_GROUPS = [ 'CN=cvat_workers,CN=Groups,%s' % _BASE_DN, ] @@ -193,7 +186,6 @@ AUTH_LDAP_USER_GROUPS = [ DJANGO_AUTH_LDAP_GROUPS = { "admin": AUTH_LDAP_ADMIN_GROUPS, - "business": AUTH_LDAP_BUSINESS_GROUPS, "user": AUTH_LDAP_USER_GROUPS, "worker": AUTH_LDAP_WORKER_GROUPS, } diff --git a/site/content/en/docs/administration/basics/admin-account.md b/site/content/en/docs/administration/basics/admin-account.md index 08182f80a22b..bb72a99af89b 100644 --- a/site/content/en/docs/administration/basics/admin-account.md +++ b/site/content/en/docs/administration/basics/admin-account.md @@ -11,7 +11,7 @@ The user you register by default does not have full permissions on the instance, so you must create a superuser. The superuser can use [Django administration panel](http://localhost:8080/admin) to assign groups (roles) to other users. -
Available roles are: user (default), admin, business, worker. +
Available roles are: user (default), admin, worker. ### Prerequisites diff --git a/site/content/en/docs/administration/basics/installation.md b/site/content/en/docs/administration/basics/installation.md index d54d818b17ca..9b864b99eac2 100644 --- a/site/content/en/docs/administration/basics/installation.md +++ b/site/content/en/docs/administration/basics/installation.md @@ -460,7 +460,7 @@ export CVAT_HOST= ### Share path You can use shared storage for uploading data when you create a task. -To do that, you must mount the shared storage to the CVAT docker container. Example of +To do that, you need to mount the shared storage to the CVAT docker container. Example of docker-compose.override.yml for this purpose: ```yml @@ -477,6 +477,9 @@ services: cvat_worker_annotation: volumes: - cvat_share:/home/django/share:ro + cvat_worker_chunks: + volumes: + - cvat_share:/home/django/share:ro volumes: cvat_share: diff --git a/site/content/en/docs/api_sdk/cli/_index.md b/site/content/en/docs/api_sdk/cli/_index.md index f17d712717b2..ffa5be80676b 100644 --- a/site/content/en/docs/api_sdk/cli/_index.md +++ b/site/content/en/docs/api_sdk/cli/_index.md @@ -7,20 +7,27 @@ description: '' ## Overview -A simple command line interface for working with CVAT tasks. At the moment it +A simple command line interface for working with CVAT. At the moment it implements a basic feature set but may serve as the starting point for a more comprehensive CVAT administration tool in the future. -Overview of functionality: +The following subcommands are supported: -- Create a new task (supports name, bug tracker, project, labels JSON, local/share/remote files) -- Delete tasks (supports deleting a list of task IDs) -- List all tasks (supports basic CSV or JSON output) -- Download JPEG frames (supports a list of frame IDs) -- Dump annotations (supports all formats via format string) -- Upload annotations for a task in the specified format (e.g. 'YOLO ZIP 1.0') -- Export and download a whole task -- Import a task +- Projects: + - `create` - create a new project + - `delete` - delete projects + - `ls` - list all projects + +- Tasks: + - `create` - create a new task + - `create-from-backup` - create a task from a backup file + - `delete` - delete tasks + - `ls` - list all tasks + - `frames` - download frames from a task + - `export-dataset` - export a task as a dataset + - `import-dataset` - import annotations into a task from a dataset + - `backup` - back up a task + - `auto-annotate` - automatically annotate a task using a local function ## Installation @@ -30,55 +37,37 @@ To install an [official release of CVAT CLI](https://pypi.org/project/cvat-cli/) pip install cvat-cli ``` -We support Python versions 3.8 and higher. +We support Python versions 3.9 and higher. ## Usage -You can get help with `cvat-cli --help`. +The general form of a CLI command is: -``` -usage: cvat-cli [-h] [--version] [--insecure] [--auth USER:[PASS]] [--server-host SERVER_HOST] - [--server-port SERVER_PORT] [--organization SLUG] [--debug] - {create,delete,ls,frames,dump,upload,export,import,auto-annotate} ... - -Perform common operations related to CVAT tasks. - -positional arguments: - {create,delete,ls,frames,dump,upload,export,import,auto-annotate} - -options: - -h, --help show this help message and exit - --version show program's version number and exit - --insecure Allows to disable SSL certificate check - --auth USER:[PASS] defaults to the current user and supports the PASS environment variable or password - prompt (default user: ...). - --server-host SERVER_HOST - host (default: localhost) - --server-port SERVER_PORT - port (default: 80 for http and 443 for https connections) - --organization SLUG, --org SLUG - short name (slug) of the organization to use when listing or creating resources; set - to blank string to use the personal workspace (default: list all accessible objects, - create in personal workspace) - --debug show debug output +```console +$ cvat-cli ``` -You can get help for each positional argument, e.g. `ls`: +where: -```bash -cvat-cli ls -h -``` -``` -usage: cvat-cli ls [-h] [--json] +- `` are options shared between all subcommands; +- `` is a CVAT resource, such as `task`; +- `` is the action to do with the resource, such as `create`; +- `` is any options specific to a particular resource and action. -List all CVAT tasks in simple or JSON format. +You can list available subcommands and options using the `--help` option: -optional arguments: - -h, --help show this help message and exit - --json output JSON data ``` +$ cvat-cli --help # get help on available common options and resources +$ cvat-cli --help # get help on actions for the given resource +$ cvat-cli --help # get help on action-specific options +``` + +The CLI implements alias subcommands for some task actions, so that, +for example, `cvat-cli ls` works the same way as `cvat-cli task ls`. These +aliases are provided for backwards compatibility and are deprecated. +Use the `task ` form instead. -## Examples +## Examples - tasks ### Create @@ -108,30 +97,30 @@ by using the {{< ilink "/docs/manual/basics/create_an_annotation_task#labels" "l - Create a task named "new task" on the default server "localhost:8080", labels from the file "labels.json" and local images "file1.jpg" and "file2.jpg", the task will be created as current user: ```bash - cvat-cli create "new task" --labels labels.json local file1.jpg file2.jpg + cvat-cli task create "new task" --labels labels.json local file1.jpg file2.jpg ``` - Create a task named "task 1" on the server "example.com" labels from the file "labels.json" and local image "image1.jpg", the task will be created as user "user-1": ```bash - cvat-cli --server-host example.com --auth user-1 create "task 1" \ + cvat-cli --server-host example.com --auth user-1 task create "task 1" \ --labels labels.json local image1.jpg ``` - Create a task named "task 1" on the default server, with labels from "labels.json" and local image "file1.jpg", as the current user, in organization "myorg": ```bash - cvat-cli --org myorg create "task 1" --labels labels.json local file1.jpg + cvat-cli --org myorg task create "task 1" --labels labels.json local file1.jpg ``` - Create a task named "task 1", labels from the project with id 1 and with a remote video file, the task will be created as user "user-1": ```bash - cvat-cli --auth user-1:password create "task 1" --project_id 1 \ + cvat-cli --auth user-1:password task create "task 1" --project_id 1 \ remote https://github.com/opencv/opencv/blob/master/samples/data/vtest.avi?raw=true ``` - Create a task named "task 1 sort random", with labels "cat" and "dog", with chunk size 8, with sorting-method random, frame step 10, copy the data on the CVAT server, with use zip chunks and the video file will be taken from the shared resource: ```bash - cvat-cli create "task 1 sort random" --labels '[{"name": "cat"},{"name": "dog"}]' --chunk_size 8 \ + cvat-cli task create "task 1 sort random" --labels '[{"name": "cat"},{"name": "dog"}]' --chunk_size 8 \ --sorting-method random --frame_step 10 --copy_data --use_zip_chunks share //share/dataset_1/video.avi ``` - Create a task named "task from dataset_1", labels from the file "labels.json", with link to bug tracker, @@ -139,89 +128,89 @@ by using the {{< ilink "/docs/manual/basics/create_an_annotation_task#labels" "l from the file "annotation.xml", the data will be loaded from "dataset_1/images/", the task will be created as user "user-2", and the password will need to be entered additionally: ```bash - cvat-cli --auth user-2 create "task from dataset_1" --labels labels.json \ + cvat-cli --auth user-2 task create "task from dataset_1" --labels labels.json \ --bug_tracker https://bug-tracker.com/0001 --image_quality 75 --annotation_path annotation.xml \ --annotation_format "CVAT 1.1" local dataset_1/images/ ``` - Create a task named "segmented task 1", labels from the file "labels.json", with overlay size 5, segment size 100, with frames 5 through 705, using cache and with a remote video file: ```bash - cvat-cli create "segmented task 1" --labels labels.json --overlap 5 --segment_size 100 \ + cvat-cli task create "segmented task 1" --labels labels.json --overlap 5 --segment_size 100 \ --start_frame 5 --stop_frame 705 --use_cache \ remote https://github.com/opencv/opencv/blob/master/samples/data/vtest.avi?raw=true ``` - Create a task named "task with filtered cloud storage data", with filename_pattern `test_images/*.jpeg` and using the data from the cloud storage resource described in the manifest.jsonl: ```bash - cvat-cli create "task with filtered cloud storage data" --labels '[{"name": "car"}]'\ + cvat-cli task create "task with filtered cloud storage data" --labels '[{"name": "car"}]'\ --use_cache --cloud_storage_id 1 --filename_pattern "test_images/*.jpeg" share manifest.jsonl ``` - Create a task named "task with filtered cloud storage data" using all data from the cloud storage resource described in the manifest.jsonl by specifying filename_pattern `*`: ```bash - cvat-cli create "task with filtered cloud storage data" --labels '[{"name": "car"}]'\ + cvat-cli task create "task with filtered cloud storage data" --labels '[{"name": "car"}]'\ --use_cache --cloud_storage_id 1 --filename_pattern "*" share manifest.jsonl ``` ### Delete -- Delete tasks with id "100", "101", "102" , the command will be executed from "user-1" having delete permissions: +- Delete tasks with IDs "100", "101", "102" , the command will be executed from "user-1" having delete permissions: ```bash - cvat-cli --auth user-1:password delete 100 101 102 + cvat-cli --auth user-1:password task delete 100 101 102 ``` ### List - List all tasks: ```bash - cvat-cli ls + cvat-cli task ls ``` - List all tasks in organization "myorg": ```bash - cvat-cli --org myorg ls + cvat-cli --org myorg task ls ``` - Save list of all tasks into file "list_of_tasks.json": ```bash - cvat-cli ls --json > list_of_tasks.json + cvat-cli task ls --json > list_of_tasks.json ``` ### Frames - Save frame 12, 15, 22 from task with id 119, into "images" folder with compressed quality: ```bash - cvat-cli frames --outdir images --quality compressed 119 12 15 22 + cvat-cli task frames --outdir images --quality compressed 119 12 15 22 ``` -### Dump annotation +### Export as a dataset -- Dump annotation task with id 103, in the format `CVAT for images 1.1` and save to the file "output.zip": +- Export annotation task with id 103, in the format `CVAT for images 1.1` and save to the file "output.zip": ```bash - cvat-cli dump --format "CVAT for images 1.1" 103 output.zip + cvat-cli task export-dataset --format "CVAT for images 1.1" 103 output.zip ``` -- Dump annotation task with id 104, in the format `COCO 1.0` and save to the file "output.zip": +- Export annotation task with id 104, in the format `COCO 1.0` and save to the file "output.zip": ```bash - cvat-cli dump --format "COCO 1.0" 104 output.zip + cvat-cli task export-dataset --format "COCO 1.0" 104 output.zip ``` -### Upload annotation +### Import annotations from a dataset -- Upload annotation into task with id 105, in the format `CVAT 1.1` from the file "annotation.xml": +- Import annotation into task with id 105, in the format `CVAT 1.1` from the file "annotation.xml": ```bash - cvat-cli upload --format "CVAT 1.1" 105 annotation.xml + cvat-cli task import-dataset --format "CVAT 1.1" 105 annotation.xml ``` -### Export task +### Back up a task -- Export task with id 136 to file "task_136.zip": +- Back up task with id 136 to file "task_136.zip": ```bash - cvat-cli export 136 task_136.zip + cvat-cli task backup 136 task_136.zip ``` -### Import +### Create from backup -- Import task from file "task_backup.zip": +- Create a task from backup file "task_backup.zip": ```bash - cvat-cli import task_backup.zip + cvat-cli task create-from-backup task_backup.zip ``` ### Auto-annotate @@ -271,13 +260,13 @@ It can auto-annotate using AA functions implemented in one of the following ways - Annotate the task with id 137 with the predefined torchvision detection function, which is parameterized: ```bash - cvat-cli auto-annotate 137 --function-module cvat_sdk.auto_annotation.functions.torchvision_detection \ + cvat-cli task auto-annotate 137 --function-module cvat_sdk.auto_annotation.functions.torchvision_detection \ -p model_name=str:fasterrcnn_resnet50_fpn_v2 -p box_score_thresh=float:0.5 ``` - Annotate the task with id 138 with an AA function defined in `my_func.py`: ```bash - cvat-cli auto-annotate 138 --function-file path/to/my_func.py + cvat-cli task auto-annotate 138 --function-file path/to/my_func.py ``` Note that this command does not modify the Python module search path. @@ -289,5 +278,41 @@ if it isn't there already. located in the `my-project` directory, letting it import other modules from that directory. ```bash - PYTHONPATH=path/to/my-project cvat-cli auto-annotate 139 --function-module my_func + PYTHONPATH=path/to/my-project cvat-cli task auto-annotate 139 --function-module my_func + ``` + +## Examples - projects + +### Create + +While creating a project, you may optionally define its labels. +The `project create` command accepts labels in the same format as the `task create` command; +see that command's examples for more information. + +- Create a project named "new project" on the default server "localhost:8080", + with labels from the file "labels.json": + ```bash + cvat-cli project create "new project" --labels labels.json + ``` +- Create a project from a dataset in the COCO format: + ```bash + cvat-cli project create "new project" --dataset_file coco.zip --dataset_format "COCO 1.0" + ``` + +### Delete + +- Delete projects with IDs "100", "101", "102": + ```bash + cvat-cli project delete 100 101 102 + ``` + +### List + +- List all projects: + ```bash + cvat-cli project ls + ``` +- Save list of all projects into file "list_of_projects.json": + ```bash + cvat-cli project ls --json > list_of_projects.json ``` diff --git a/site/content/en/docs/api_sdk/sdk/_index.md b/site/content/en/docs/api_sdk/sdk/_index.md index 4c133a7b0231..e855dadd979f 100644 --- a/site/content/en/docs/api_sdk/sdk/_index.md +++ b/site/content/en/docs/api_sdk/sdk/_index.md @@ -42,13 +42,20 @@ To install an [official release of CVAT SDK](https://pypi.org/project/cvat-sdk/) pip install cvat-sdk ``` -To use the PyTorch adapter, request the `pytorch` extra: +To use the `cvat_sdk.masks` module, request the `masks` extra: + +```bash +pip install "cvat-sdk[masks]" +``` + +To use the PyTorch adapter or the built-in PyTorch-based auto-annotation functions, +request the `pytorch` extra: ```bash pip install "cvat-sdk[pytorch]" ``` -We support Python versions 3.8 and higher. +We support Python versions 3.9 and higher. ## Usage diff --git a/site/content/en/docs/api_sdk/sdk/auto-annotation.md b/site/content/en/docs/api_sdk/sdk/auto-annotation.md index 24e16c7e6218..b50a680c7364 100644 --- a/site/content/en/docs/api_sdk/sdk/auto-annotation.md +++ b/site/content/en/docs/api_sdk/sdk/auto-annotation.md @@ -68,7 +68,12 @@ class TorchvisionDetectionFunction: ] ) - def detect(self, context, image: PIL.Image.Image) -> List[models.LabeledShapeRequest]: + def detect( + self, context: cvataa.DetectionFunctionContext, image: PIL.Image.Image + ) -> list[models.LabeledShapeRequest]: + # determine the threshold for filtering results + conf_threshold = context.conf_threshold or 0 + # convert the input into a form the model can understand transformed_image = [self._transforms(image)] @@ -79,7 +84,8 @@ class TorchvisionDetectionFunction: return [ cvataa.rectangle(label.item(), [x.item() for x in box]) for result in results - for box, label in zip(result['boxes'], result['labels']) + for box, label, score in zip(result["boxes"], result["labels"], result["scores"]) + if score >= conf_threshold ] # log into the CVAT server @@ -112,9 +118,13 @@ that these objects must follow. `detect` must be a function/method accepting two parameters: - `context` (`DetectionFunctionContext`). - Contains information about the current image. - Currently `DetectionFunctionContext` only contains a single field, `frame_name`, - which contains the file name of the frame on the CVAT server. + Contains invocation parameters and information about the current image. + The following fields are available: + + - `frame_name` (`str`). The file name of the frame on the CVAT server. + - `conf_threshold` (`float | None`). The confidence threshold that the function + should use to filter objects. If `None`, the function may apply a default + threshold at its discretion. - `image` (`PIL.Image.Image`). Contains image data. @@ -153,7 +163,7 @@ The same logic is used for sub-label IDs. ### Helper factory functions The CVAT API model types used in the AA function protocol are somewhat unwieldy to work with, -so it's recommented to use the helper factory functions provided by this layer. +so it's recommended to use the helper factory functions provided by this layer. These helpers instantiate an object of their corresponding model type, passing their arguments to the model constructor and sometimes setting some attributes to fixed values. @@ -171,10 +181,23 @@ The following helpers are available for use in `detect`: | Name | Model type | Fixed attributes | |-------------|--------------------------|-------------------------------| | `shape` | `LabeledShapeRequest` | `frame=0` | +| `mask` | `LabeledShapeRequest` | `frame=0`, `type="mask"` | +| `polygon` | `LabeledShapeRequest` | `frame=0`, `type="polygon"` | | `rectangle` | `LabeledShapeRequest` | `frame=0`, `type="rectangle"` | | `skeleton` | `LabeledShapeRequest` | `frame=0`, `type="skeleton"` | | `keypoint` | `SubLabeledShapeRequest` | `frame=0`, `type="points"` | +For `mask`, it is recommended to create the points list using +the `cvat.masks.encode_mask` function, which will convert a bitmap into a +list in the format that CVAT expects. For example: + +```python +cvataa.mask(my_label, encode_mask( + my_mask, # boolean 2D array, same size as the input image + [x1, y1, x2, y2], # top left and bottom right coordinates of the mask +)) +``` + ## Auto-annotation driver The `annotate_task` function uses an AA function to annotate a CVAT task. @@ -195,6 +218,9 @@ If you use `allow_unmatched_label=True`, then such labels will be ignored, and any shapes referring to them will be dropped. Same logic applies to sub-label IDs. +It's possible to pass a custom confidence threshold to the function via the +`conf_threshold` parameter. + `annotate_task` will raise a `BadFunctionError` exception if it detects that the function violated the AA function protocol. @@ -244,10 +270,18 @@ The `create` function accepts the following parameters: It also accepts arbitrary additional parameters, which are passed directly to the model constructor. +### `cvat_sdk.auto_annotation.functions.torchvision_instance_segmentation` + +This AA function is analogous to `torchvision_detection`, +except it uses torchvision's instance segmentation models and produces mask +or polygon annotations (depending on the value of `conv_mask_to_poly`). + +Refer to that function's description for usage instructions and parameter information. + ### `cvat_sdk.auto_annotation.functions.torchvision_keypoint_detection` This AA function is analogous to `torchvision_detection`, except it uses torchvision's keypoint detection models and produces skeleton annotations. Keypoints which the model marks as invisible will be marked as occluded in CVAT. -Refer to the previous section for usage instructions and parameter information. +Refer to that function's description for usage instructions and parameter information. diff --git a/site/content/en/docs/api_sdk/sdk/developer-guide.md b/site/content/en/docs/api_sdk/sdk/developer-guide.md index 65047488df34..f4dda3f71475 100644 --- a/site/content/en/docs/api_sdk/sdk/developer-guide.md +++ b/site/content/en/docs/api_sdk/sdk/developer-guide.md @@ -32,15 +32,15 @@ the repository. To get the full package, one need to generate missing package fi 1. Install the packages: ```bash - pip install cvat-sdk/ - pip install cvat-cli/ + pip install ./cvat-sdk + pip install ./cvat-cli ``` If you want to edit package files, install them with `-e`: ```bash - pip install -e cvat-sdk/ - pip install -e cvat-cli/ + pip install -e ./cvat-sdk + pip install -e ./cvat-cli ``` ## How to edit templates @@ -124,7 +124,7 @@ usage patterns and simpler/faster ways to achieve results. default `/_`. - Server operations have different types for input and output values. - While it can be expected that an endopint with POST/PUT methods available + While it can be expected that an endpoint with POST/PUT methods available (like `create` or `partial_update`) has the same type for input and output (because it looks natural), it also leads to the situation, in which there are lots of read-/write-only fields, and it becomes hard for understanding. diff --git a/site/content/en/docs/api_sdk/sdk/pytorch-adapter.md b/site/content/en/docs/api_sdk/sdk/pytorch-adapter.md index 65dff955e4c6..57090e0c38d7 100644 --- a/site/content/en/docs/api_sdk/sdk/pytorch-adapter.md +++ b/site/content/en/docs/api_sdk/sdk/pytorch-adapter.md @@ -203,7 +203,7 @@ You can change this using the following constructor parameters: will be included. Both parameters can be set, -in which case tasks must fulfull both criteria to be included. +in which case tasks must fulfill both criteria to be included. ### Caching diff --git a/site/content/en/docs/contributing/development-environment.md b/site/content/en/docs/contributing/development-environment.md index cf2b1c01d713..31fb2f755c7a 100644 --- a/site/content/en/docs/contributing/development-environment.md +++ b/site/content/en/docs/contributing/development-environment.md @@ -80,7 +80,7 @@ description: 'Installing a development environment for different operating syste python3 -m venv .env . .env/bin/activate pip install -U pip wheel setuptools - pip install -r cvat/requirements/development.txt + pip install -r cvat/requirements/development.txt -r dev/requirements.txt ``` Note that the `.txt` files in the `cvat/requirements` directory @@ -168,6 +168,7 @@ description: 'Installing a development environment for different operating syste ```bash python manage.py migrate python manage.py collectstatic + python manage.py syncperiodicjobs python manage.py createsuperuser ``` diff --git a/site/content/en/docs/enterprise/immediate-feedback.md b/site/content/en/docs/enterprise/immediate-feedback.md index 76bde8df99e5..e336e7815e21 100644 --- a/site/content/en/docs/enterprise/immediate-feedback.md +++ b/site/content/en/docs/enterprise/immediate-feedback.md @@ -2,49 +2,80 @@ title: 'Immediate job feedback' linkTitle: 'Immediate job feedback' weight: 5 -description: 'This feature provides annotators with general feedback on their performance in a job.' +description: 'Quick responses about job annotation quality' --- -When an annotator finishes a job, a dialog is displayed showing the quality of their annotations. -The annotator can either agree or disagree with the feedback. +## Overview + +The basic idea behind this feature is to provide annotators with quick feedback on their +performance in a job. When an annotator finishes a job, a dialog is displayed showing the +quality of their annotations. The annotator can either agree or disagree with the feedback. If they disagree, they have the option to re-annotate the job and request feedback again. -However, feedback is only available a limited number of times, as specified in the task's quality settings. -To ensure transparency with the annotator, the immediate feedback shows the collected score and -the minimum required score. -Immediate feedback settings, such as `Target metric`, `Target metric threshold`, -`Max validations per job` and others, can be configured on the quality settings page: - +To ensure transparency with the annotator, the immediate feedback shows the computed score and +the minimum required score. Information about the specific errors or frames that have errors is +not available to annotators. + +Feedback is only available a limited number of times for each assignment, to prevent +Ground Truth revealing by annotators. This is controlled by a configurable parameter, so +it can be adjusted to the requirements of each project. + +## How to configure + +Immediate feedback settings, such as `Target metric`, `Target metric threshold`, +`Max validations per job` and others, can be configured on the quality settings page. + +This feature is considered enabled if the `Max validations per job` is above 0. You can change +the parameters any time. + +> **Note**: This feature requires a configured validation set in the task. Read more +> in the +> {{< ilink "/docs/manual/basics/quality-control#how-to-enable-quality-control" "quality overview" >}} +> section or in the +{{< ilink "/docs/manual/advanced/analytics-and-monitoring/auto-qa#configuring-quality-estimation" "full guide" >}}. + +1. Open the task **Actions** menu > **Quality control** > **Settings** + + ![Configure job validations](/images/immediate-feedback-quality-settings.png) + +2. Set the `Target metric` and `Target metric threshold` values to what is required in your project. +3. Set **Max validations per job** to above zero. 3 is a good starting number. +4. Save the updated settings - +## How to receive a feedback + +1. Assign an annotator to an annotation job +2. Annotate the job +3. Mark the job finished using the corresponding button in the menu +4. Once the job is completed, you'll see the job validation dialog + + + +Each assignee gets no more than the specified number of validation attempts. + +> **Note**: this functionality is only available in regular annotation jobs. For instance, +> it's not possible to use it in Ground Truth jobs. ### Available feedbacks There are three types of feedbacks available for different cases: - Accepted -- Rejected, but can be adjusted +- Rejected, with an option to fix mistakes - Finally rejected when the number of attempts is exhausted -Notes: +## Additional details > Immediate feedback has a default timeout of 20 seconds. -Feedback may be unavailable for large jobs or when there are too many immediate feedback requests. -In this case annotators do not see any feedback dialogs. +> Feedback may be unavailable for large jobs or when there are too many immediate feedback requests. +> In this case annotators do not see any feedback dialogs and annotate jobs as +> if the feature was disabled. -> The number of attempts does not decrease for staff members who have access to a job with ground truth annotations. +> The number of attempts does not decrease for staff members who have access to a job +> with ground truth annotations. For instance, if you're trying to test this feature as the task +> owner, you may be confused if you see the number of attempts doesn't decrease. > The number of attempts resets when the job assignee is updated. - -Requirements: -1. The task is configured with a Ground Truth job that has been annotated, -moved to the acceptance stage, and is in the completed state. -2. The current job is in the annotation stage. -3. The current job is a regular annotation job. Immediate feedback is not available for Ground Truth jobs -4. The `Max validations per job` setting has been configured on the quality settings page. - - - diff --git a/site/content/en/docs/enterprise/segment-anything-2-tracker.md b/site/content/en/docs/enterprise/segment-anything-2-tracker.md new file mode 100644 index 000000000000..86c7018f7f41 --- /dev/null +++ b/site/content/en/docs/enterprise/segment-anything-2-tracker.md @@ -0,0 +1,101 @@ +--- +title: 'Segment Anything 2 Tracker' +linkTitle: 'Segment Anything 2 Tracker' +weight: 6 +description: 'Accelerating video labeling using SAM2 model' +--- + +## Overview + +Segment Anything 2 is a segmentation model that allows fast and precise selection of any object in videos or images. +For enterprise customers, this model can be installed in their self-hosted solution. To ensure a good experience, +it is strongly recommended to deploy the model using a GPU. Although it is possible to use a CPU-based version, +it generally performs much slower and is suitable only for handling a single parallel request. Unlike a regular +tracking model, the SAM 2 tracker is implemented as an annotation action. This allows it to be applied to existing +objects (polygons and masks) to track them forward for a specified number of frames. + +## How to install + +> **Note**: This feature is not available in the community CVAT version. + +> **Note**: This feature requires the enhanced actions UI plugin, which is enabled by default. +Usually, no additional steps are necessary on this. + +### Docker + +You can use existing scripts from the community repository +(`./serverless/deploy_cpu.sh` or `./serverless/deploy_gpu.sh`). +To deploy the feature, simply run: + +```sh +./serverless/deploy_gpu.sh "path/to/the/function" +``` + +### Kubernetes + +- You need to deploy the Nuclio function manually. +Note that this function requires a Redis storage configured to keep the tracking state. +You may use the same storage as `cvat_redis_ondisk` uses. +When running the `nuclio deploy` command, make sure to provide the necessary arguments. +The minimal command is: + +```sh +nuctl deploy "path/to/the/function" + --env CVAT_FUNCTIONS_REDIS_HOST="" + --env CVAT_FUNCTIONS_REDIS_PORT="" + --env CVAT_FUNCTIONS_REDIS_PASSWORD="" # if applicable +``` + +## Running on an object + +The tracker can be applied to any polygons and masks. To run the tracker on an object, open the object menu and click +"Run annotation action". + + + +Alternatively, you can use a hotkey: select the object and press **Ctrl + E** (default shortcut). +When the modal opened, in "Select action" list, choose **Segment Anything 2: Tracker**: + + + +Specify the **target frame** until which you want the object to be tracked, +then click the **Run** button to start tracking. The process begins and may take some time to complete. +The duration depends on the inference device, and the number of frames where the object will be tracked. + + + +Once the process is complete, the modal window closes. You can review how the object was tracked. +If you notice that the tracked shape deteriorates at some point, +you can adjust the object coordinates and run the tracker again from that frame. + +## Running on multiple objects + +Instead of tracking each object individually, you can track multiple objects +simultaneously. To do this, click the **Menu** button in the annotation view and select the **Run Actions** option: + + + +Alternatively, you can use a hotkey: just press **Ctrl + E** (default shortcut) when there are no objects selected. +This opens the actions modal. In this case, the tracker will be applied to all visible objects of suitable types +(polygons and masks). In the action list of the opened model, select **Segment Anything 2: Tracker**: + + + +Specify the **target frame** until which you want the objects to be tracked, +then click the **Run** button to start tracking. The process begins and may take some time to complete. +The duration depends on the inference device, the number of simultaneously tracked objects, +and the number of frames where the object will be tracked. + + + +Once the process finishes, you may close the modal and review how the objects were tracked. +If you notice that the tracked shapes deteriorate, you can adjust their +coordinates and run the tracker again from that frame (for a single object or for many objects). + + +## Tracker parameters + +- **Target frame**: Objects will be tracked up to this frame. Must be greater than the current frame +- **Convert polygon shapes to tracks**: When enabled, all visible polygon shapes in the current frame will be converted +to tracks before tracking begins. Use this option if you need tracks as the final output but started with shapes, +produced for example by interactors (e.g. SAM2 or another one). diff --git a/site/content/en/docs/enterprise/shapes-converter.md b/site/content/en/docs/enterprise/shapes-converter.md index fbceb757c3f1..43caf0e122eb 100644 --- a/site/content/en/docs/enterprise/shapes-converter.md +++ b/site/content/en/docs/enterprise/shapes-converter.md @@ -50,7 +50,7 @@ With the following fields: If unsaved changes are detected, a prompt will advise to save these changes to avoid any potential loss of data. -- **Disabу auto-save:** Prior to running the annotation action, disabling the auto-save feature +- **Disable auto-save:** Prior to running the annotation action, disabling the auto-save feature is advisable. A notification will suggest this action if auto-save is currently active. - **Committing changes:** Changes applied during the annotation session diff --git a/site/content/en/docs/enterprise/social-accounts-configuration.md b/site/content/en/docs/enterprise/social-accounts-configuration.md index 0a521ef9a905..e2fb506d986b 100644 --- a/site/content/en/docs/enterprise/social-accounts-configuration.md +++ b/site/content/en/docs/enterprise/social-accounts-configuration.md @@ -2,7 +2,7 @@ title: 'Social auth configuration' linkTitle: 'Social auth configuration' weight: 3 -description: 'Social accounts authentication for Self-Hosted solution' +description: 'Social accounts authentication for a Self-Hosted solution' --- > **Note:** This is a paid feature available for [Enterprise clients](https://www.cvat.ai/pricing/on-prem). @@ -51,7 +51,7 @@ To enable authentication, do the following: configure: **Application name**, **Authorized JavaScript origins**, **Authorized redirect URIs**.
For example, if you plan to deploy CVAT instance on `https://localhost:8080`, add `https://localhost:8080` to authorized JS origins and `https://localhost:8080/api/auth/social/goolge/login/callback/` to redirect URIs. -8. Create conпiguration file in CVAT: +8. Create configuration file in CVAT: 1. Create the `auth_config.yml` file with the following content: @@ -81,7 +81,7 @@ There are 2 basic steps to enable GitHub account authentication.
For more information, see [Creating an OAuth App](https://docs.github.com/en/developers/apps/building-oauth-apps/creating-an-oauth-app) 3. Fill in the name field, set the homepage URL (for example: `https://localhost:8080`), and authentication callback URL (for example: `https://localhost:8080/api/auth/social/github/login/callback/`). -4. Create conпiguration file in CVAT: +4. Create configuration file in CVAT: 1. Create the `auth_config.yml` file with the following content: diff --git a/site/content/en/docs/faq.md b/site/content/en/docs/faq.md index 7fef81c88d92..99db9a8b9248 100644 --- a/site/content/en/docs/faq.md +++ b/site/content/en/docs/faq.md @@ -84,6 +84,9 @@ services: cvat_worker_annotation: volumes: - cvat_share:/home/django/share:ro + cvat_worker_chunks: + volumes: + - cvat_share:/home/django/share:ro volumes: cvat_share: diff --git a/site/content/en/docs/manual/advanced/analytics-and-monitoring/auto-qa.md b/site/content/en/docs/manual/advanced/analytics-and-monitoring/auto-qa.md index e2642f4d59ee..21ebd2d99087 100644 --- a/site/content/en/docs/manual/advanced/analytics-and-monitoring/auto-qa.md +++ b/site/content/en/docs/manual/advanced/analytics-and-monitoring/auto-qa.md @@ -1,167 +1,351 @@ --- -title: 'Automated QA, Review & Honeypot' +title: 'Automated QA, Review & Honeypots' linkTitle: 'Automated QA' weight: 1 description: 'Guidelines for assessing annotation quality in CVAT automatically' --- -In CVAT, it's possible to evaluate the quality of annotation through -the creation of a **Ground truth** job, referred to as a Honeypot. -To estimate the task quality, CVAT compares all other jobs in the task against the -established **Ground truth** job, and calculates annotation quality -based on this comparison. +In CVAT, it's possible to evaluate the quality of annotation through the creation +of a validation subset of images. To estimate the task quality, CVAT compares +all other jobs in the task against the established **Ground truth** job, +and calculates annotation quality based on this comparison. > **Note** that quality estimation only supports > 2d tasks. It supports all the annotation types except 2d cuboids. -> **Note** that tracks are considered separate shapes -> and compared on a per-frame basis with other tracks and shapes. - -See: - -- [Ground truth job](#ground-truth-job) -- [Managing Ground Truth jobs: Import, Export, and Deletion](#managing-ground-truth-jobs-import-export-and-deletion) - - [Import](#import) - - [Export](#export) - - [Delete](#delete) -- [Assessing data quality with Ground truth jobs](#assessing-data-quality-with-ground-truth-jobs) - - [Quality data](#quality-data) - - [Annotation quality settings](#annotation-quality-settings) - - [GT conflicts in the CVAT interface](#gt-conflicts-in-the-cvat-interface) -- [Annotation quality \& Honeypot video tutorial](#annotation-quality--honeypot-video-tutorial) - -## Ground truth job - -A **Ground truth** job is a way to tell CVAT where to store -and get the "correct" annotations for task quality estimation. - -To estimate task quality, you need to -create a **Ground truth** job in the task, -and annotate it. You don’t need to -annotate the whole dataset twice, -the annotation quality of a small part of -the data shows the quality of annotation for -the whole dataset. - -For the quality assurance to function correctly, the **Ground truth** job must -have a small portion of the task frames and the frames must be chosen randomly. -Depending on the dataset size and task complexity, +> **Note** that quality estimation is currently available for tasks and jobs. +> Quality estimation in projects is not supported. + +CVAT has the following features for automated quality control of annotations: +- Validation set configuration for a task +- Job validation on job finish ("{{< ilink "/docs/enterprise/immediate-feedback" "Immediate feedback" >}}") +- Review mode for problems found +- Quality analytics + +## Basics + +There are several approaches to quality estimation used in the industry. In CVAT, +we can use a method known as Ground Truth or Honeypots. The method assumes there are +Ground Truth annotations for images in the dataset. This method is statistical, +which means that we can use only a small portion of the whole dataset to +estimate quality on the full dataset, so we don't need to annotate the whole dataset twice. +Here we assume that the images in the dataset are similar (represent the same task). + +We will call the validation portion of the whole dataset (or a task in CVAT) a validation set. +In practice, it is typically expected that annotations in the validation set are carefully +validated and curated. It means that they are more expensive - creating them might require +expert annotators or just several iterations of annotation and validation. It means that it's +desirable to keep the validation set small enough. At the same time, it must be representative +enough to provide reliable estimations. To achieve this, it's advised that the validation set +images are sampled randomly and independently from the full dataset. +That is, for the quality assurance to function correctly, the validation set must +have some portion of the task frames, and the frames must be chosen randomly. + +Depending on the dataset size, data variance, and task complexity, **5-15% of the data is typically good enough** for quality estimation, -while keeping extra annotation overhead acceptable. +while keeping extra annotation overhead for the Ground Truth acceptable. For example, in a typical **task with 2000 frames**, selecting **just 5%**, which is 100 extra frames to annotate, **is enough** to estimate the annotation quality. If the task contains **only 30 frames**, it's advisable to -select **8-10 frames**, which is **about 30%**. +select **8-10 frames**, which is **about 30%**. It is more than 15%, +but in the case of smaller datasets, we need more samples to estimate quality reliably, +as data variance is higher. -It is more than 15% but in the case of smaller datasets, -we need more samples to estimate quality reliably. +## Ground truth jobs -To create a **Ground truth** job, do the following: +A **Ground Truth job** (GT job) is a way to represent the validation set in a CVAT task. +This job is similar to regular annotation jobs - you can edit the annotations manually, +use auto-annotation features, and import annotations in this job. There can be no more +than 1 Ground Truth job in a task. -1. Create a {{< ilink "/docs/manual/basics/create_an_annotation_task" "task" >}}, and open the task page. +To enable quality estimation in a task, you need to create a Ground truth job in the task, +annotate it, switch the job stage to `acceptance`, and set the job state to `completed`. +Once the Ground Truth job is configured, CVAT will start using this job for quality estimation. + +Read more about Ground Truth management [here](#ground-truth-job-management). + +## Configuring quality estimation + +Quality estimation is configured on the Task level. + +{{< tabpane text=true >}} + +{{%tab header="In a new task" %}} +1. Go to the {{< ilink "/docs/manual/basics/create_an_annotation_task" "task creation" >}} page +2. Configure basic and advanced parameters according to your requirements, and attach a dataset to be annotated +3. Scroll down to the **Quality Control** section below +4. Select one of the [validation modes](#validation-modes) available + + ![Create task with validation mode](/images/honeypot09.jpg) + +5. Create the task and open the task page +6. Upload or create Ground Truth annotations in the Ground Truth job in the task +7. Switch the Ground Truth job into the `acceptance` stage and `completed` state + + ![Set job status](/images/honeypot10.jpg) +{{% /tab %}} + +{{%tab header="In an existing task" %}} +> For already existing tasks only the Ground Truth validation mode is available. If you want +> to use Honeypots for your task, you will need to recreate the task. + +1. Open the task page 2. Click **+**. - ![Create job](/images/honeypot01.jpg) + ![Create job](/images/honeypot01.jpg) 3. In the **Add new job** window, fill in the following fields: - ![Add new job](/images/honeypot02.jpg) + ![Configure job parameters](/images/honeypot02.jpg) - - **Job type**: Use the default parameter **Ground truth**. - - **Frame selection method**: Use the default parameter **Random**. - - **Quantity %**: Set the desired percentage of frames for the **Ground truth** job. -
**Note** that when you use **Quantity %**, the **Frames** field will be autofilled. - - **Frame count**: Set the desired number of frames for the "ground truth" job. -
**Note** that when you use **Frames**, the **Quantity %** field will be will be autofilled. - - **Seed**: (Optional) If you need to make the random selection reproducible, specify this number. - It can be any integer number, the same value will yield the same random selection (given that the - frame number is unchanged).
**Note** that if you want to use a - custom frame sequence, you can do this using the server API instead, - see [Jobs API #create](https://docs.cvat.ai/docs/api_sdk/sdk/reference/apis/jobs-api/#create). +- **Job type**: Use the default parameter **Ground truth**. +- **Frame selection method**: Use the default parameter **Random**. +- **Quantity %**: Set the desired percentage of frames for the Ground truth job. +
**Note** that when you use **Quantity %**, the **Frames** field will be autofilled. +- **Frame count**: Set the desired number of frames for the Ground truth job. +
**Note** that when you use **Frames**, the **Quantity %** field will be autofilled. +- **Seed**: (Optional) If you need to make the random selection reproducible, specify this number. + It can be any integer number, the same value will yield the same random selection (given that the + frame number is unchanged).
**Note** that if you want to use a + custom frame sequence, you can do this using the server API instead, + see [Job API create()](https://docs.cvat.ai/docs/api_sdk/sdk/reference/apis/jobs-api/#create). 4. Click **Submit**. -5. Annotate frames, save your work. -6. Change the status of the job to **Completed**. -7. Change **Stage** to **Accepted**. The **Ground truth** job will appear in the jobs list. -![Add new job](/images/honeypot03.jpg) + ![Ground Truth job](/images/honeypot03.jpg) -## Managing Ground Truth jobs: Import, Export, and Deletion +5. Annotate frames and save your work or upload annotations. +6. Switch the Ground Truth job into the `acceptance` stage and `completed` state -Annotations from **Ground truth** jobs are not included in the dataset export, -they also cannot be imported during task annotations import -or with automatic annotation for the task. + ![Set job status](/images/honeypot10.jpg) +{{% /tab %}} -Import, export, and delete options are available from the -job's menu. +{{< /tabpane >}} -![Add new job](/images/honeypot04.jpg) +> A **Ground truth** job is considered **configured** +> if it is at the **acceptance** stage and in the **completed** state. -### Import +A _configured_ Ground Truth job is required for all quality computations in CVAT. -If you want to import annotations into the **Ground truth** job, do the following. +## Validation modes -1. Open the task, and find the **Ground truth** job in the jobs list. -2. Click on three dots to open the menu. -3. From the menu, select **Import annotations**. -4. Select import format, and select file. -5. Click **OK**. +Currently, there are 2 validation modes available for tasks: **Ground Truth** and **Honeypots**. +These names are often used interchangeably, but in CVAT they have some differences. +Both modes rely on the use of Ground Truth annotations in a task, +stored in a [Ground Truth job](#ground-truth-jobs), where they can be managed. -> **Note** that if there are imported annotations for the frames that exist in the task, -> but are not included in the **Ground truth** job, they will be ignored. -> This way, you don't need to worry about "cleaning up" your **Ground truth** -> annotations for the whole dataset before importing them. -> Importing annotations for the frames that are not known in the task still raises errors. +### Ground Truth -### Export +In this mode some of the task frames are selected into the validation set, represented as a +separate Ground Truth job. The regular annotation jobs in the task are not affected in any way. -To export annotations from the **Ground truth** job, do the following. +Ground Truth jobs can be created at the task creation automatically or +manually at any moment later. They can also be removed manually at any moment. +This validation mode is available for any tasks and annotations. -1. Open the task, and find a job in the jobs list. -2. Click on three dots to open the menu. -3. From the menu, select **Export annotations**. +This is a flexible mode that can be enabled or disabled at any moment without any disruptions +to the annotation process. + +#### Frame selection + +This validation mode can use several frame selection methods. + +##### Random + +This is a simple method that selects frames into the validation set randomly, +representing the [basic approach](#basics), described above. + +Parameters: +- frame count - the number or percent of the task frames to be used for validation. + Can be specified as an absolute number in the `Frame count` field or a percent in the `Quantity` + field. If there are both fields on the page, they are linked, which means changing one of them + will adjust the other one automatically. +- random seed - a number to be used to initialize the random number generator. Can be useful if + you want to create a reproducible sequence of frames. + +##### Random per job + +This method selects frames into the validation set randomly from each annotation job in the task. + +It solves one of the issues with the simple Random method that some of the jobs can get +no validation frames, which makes it impossible to estimate quality in such jobs. Note +that using this method can result in increased total size of the validation set. + +Parameters: +- frame count per job - the percent of the job frames to be used for validation. + This method uses segment size of the task to select the same number of validation frames + in each job, if possible. Can be specified as an absolute number in the `Frame count` + field or a percent in the `Quantity per job` field. If there are both fields on the page, + they are linked, which means changing one of them will adjust the other one automatically. +- random seed - a number to be used to initialize the random number generator. Can be useful if + you want to create a reproducible sequence of frames. + +### Honeypots + +In this mode some random frames of the task are selected into the validation set. +Then, validation frames are randomly mixed into regular annotation jobs. +This mode can also be called "Ground Truth pool", reflecting the way validation frames are used. +This mode can only be used at task creation and cannot be changed later. + +The mode has some limitations on the compatible tasks: +- It's not possible to use it for an already existing task, the task has to be recreated. +- This mode assumes random frame ordering, so it is only available for image annotation tasks + and not for ordered sequences like videos. +- Tracks are not supported in such tasks. + +The validation set can be managed after the task is created - annotations can be edited, +frames can be excluded and restored, and honeypot frames in the regular jobs can be changed. +However, it's not possible to select new validation frames after the task is created. +The Ground truth job created for this validation mode cannot be deleted. + +Parameters: +- frame count per job (%) - the percent of job frames (segment size) to be **added** into each + annotation job from the validation set. Can be specified in the `Overhead per job` field. +- total frame count (%) - the percent of the task frames to be included into the validation set. + This value must result in at least `frame count per job` * `segment size` frames. Can be specified + in the `Total honeypots` field. + +### Mode summary + +Here is a brief comparison of the validation modes: + +| **Aspect** | **Ground Truth** | **Honeypots** | +| -------------- | -------------------------------------------- | ------------------------------------------- | +| When can be used | any time | at task creation only | +| Frame management options | exclude, restore | exclude, restore, change honeypots in jobs | +| Ground Truth job management options | create, delete | create | +| Task frame requirements | - | random ordering only | +| Annotations | any | tracks are not supported | +| Minimum validation frames count | - `manual` and `random_uniform` - any
 (but some jobs can get no validation frames)
- `random_per_job` - jobs count * GT frames per job | not less than honeypots count per job | +| Task annotation import | GT annotations and regular annotations do not affect each other | Annotations are imported both into the GT job and regular jobs. Annotations for validation frames are copied into corresponding honeypot frames. | +| Task annotation export | GT annotations and regular annotations do not affect each other | Annotations for non-validation frames are exported as is. Annotations for validation frames are taken from the GT frames. Honeypot frames are skipped. | + +### Choosing the right mode + +Here are some examples on how to choose between these options. The general advice is to use +Ground Truth for better flexibility, but keep in mind that it can require more resources for +validation set annotation. Honeypots, on the other hand, can be beneficial if you want to +minimize the number of validation images required, but the downside here is that there are some +limitations on where this mode can be used. + +Example: a video annotation with tracks. In this case there is only 1 option - +the Ground Truth mode, so just use it. + +Example: an image dataset annotation, image order is not important. Here you can use both options. +You can choose Ground Truth for better flexibility in validation. This way, you will have the +full control of validation frames in the task, annotation options won't be limited, and the +regular jobs will not be affected in any way. However, if you have a limited budget +for the validation (for instance, you have only a small number of validation frames) or you want +to allow more scalability (with this approach the number of validation frames doesn't depend on +the number of regular annotation jobs), it makes sense to consider using Honeypots instead. + +## Quality management + +If a task has a validation configured, there are several options to manage validation set images. +With any of the validation modes, there will be a special Ground Truth (GT) job in the task. + +### Validation set management + +Validation frames can be managed on the task Quality Management page. Here it's possible to +check the number of validation frames, current validation mode and review the frame details. +For each frame you can see the number of uses in the task. When in the Ground Truth mode, this +number will be 1 for all frames. With Honeypots, these numbers can be 0, 1 or more. + +#### Frame changes + +In both validation modes it's possible to exclude some of the validation frames +from being used for validation. This can be useful if you find that some +of the validation frames are "bad", extra, or if they have incorrect annotations, +which you don't want to fix. Once a frame is marked "excluded", it will not be used +for validation. There is also an option to restore a previously excluded frame if you decide so. + +There is an option to exclude or restore frames in bulk mode. To use it, select the frames needed +using checkboxes, and click one of the buttons next to the table header. + +#### Ground Truth job management + +In the Ground Truth validation mode, there will be an option to remove the [Ground Truth job](#ground-truth-jobs) +from the task. It can be useful if you want to change validation set frames completely, +add more frames, or remove some of the frames for any reason. This is available in the job +Actions menu. + +In the Honeypots mode, it's not possible to add or remove the GT job, so it's not possible to +add more validation frames. + +![Ground truth job actions](/images/honeypot04.jpg) + +### Create + +A Ground Truth job can be [added manually](#configuring-quality-estimation) +in a task without a selected validation mode or in a task with the Ground Truth validation mode, +after the existing Ground Truth job is [deleted manually](#delete). ### Delete -To delete the **Ground truth** job, do the following. +To delete the Ground Truth job, do the following: -1. Open the task, and find the **Ground truth** job in the jobs list. +1. Open the task and find the Ground Truth job in the jobs list. 2. Click on three dots to open the menu. 3. From the menu, select **Delete**. -## Assessing data quality with Ground truth jobs +> Note: The Ground truth job in the "Honeypots" task validation mode cannot be deleted. -Once you've established the **Ground truth** job, proceed to annotate the dataset. +### Import annotations -CVAT will begin the quality comparison between the annotated task and the -**Ground truth** job in this task once it is finished (on the `acceptance` stage and in the `completed` state). +If you want to import annotations into the Ground truth job, do the following: -> **Note** that the process of quality calculation may take up to several hours, depending on -> the amount of data and labeled objects, and is **not updated immediately** after task updates. +1. Open the task and find the Ground truth job in the jobs list. +2. Click on three dots to open the menu. +3. From the menu, select **Import annotations**. +4. Select import format and select file. +5. Click **OK**. -To view results go to the **Task** > **Actions** > **View analytics**> **Performance** tab. +> **Note** that if there are imported annotations for the frames that exist in the task, +> but are not included in the **Ground truth** job, they will be ignored. +> This way, you don't need to worry about "cleaning up" your Ground truth +> annotations for the whole dataset before importing them. +> Importing annotations for the frames that are not known in the task still raises errors. -![Add new job](/images/honeypot05.jpg) +### Export annotations -### Quality data +To export annotations from the Ground Truth job, do the following: + +1. Open the task and find a job in the jobs list. +2. Click on three dots to open the menu. +3. From the menu, select **Export annotations**. -The Analytics page has the following fields: +### Annotation management - +Annotations for validation frames can be displayed and edited in a special +[Ground Truth job](#ground-truth-jobs) in the task. You can edit the annotations manually, +use auto-annotation features, import and export annotations in this job. -| Field | Description | -| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Mean annotation quality | Displays the average quality of annotations, which includes: the count of accurate annotations, total task annotations, ground truth annotations, accuracy rate, precision rate, and recall rate. | -| GT Conflicts | Conflicts identified during quality assessment, including extra or missing annotations. Mouse over the **?** icon for a detailed conflict report on your dataset. | -| Issues | Number of {{< ilink "/docs/manual/advanced/analytics-and-monitoring/manual-qa" "opened issues" >}}. If no issues were reported, will show 0. | -| Quality report | Quality report in JSON format. | -| Ground truth job data | "Information about ground truth job, including date, time, and number of issues. | -| List of jobs | List of all the jobs in the task | +In the Ground Truth task validation mode, annotations of the ground Truth job do not affect +other jobs in any way. The Ground Truth job is just a separate job, which can only be +changed directly. Annotations from **Ground truth** jobs are not included in the dataset +export, they also cannot be imported during task annotations import +or with automatic annotation for the task. - +In the Honeypots task validation mode, the annotations of the GT job also do not affect other +jobs in any way. However, import and export of **task** annotations works differently. +When importing **task** annotations, annotations for validation frames will be copied +both into GT job frames and into corresponding honeypot frames in annotation jobs. +When exporting **task** annotations, honeypot frames in annotation jobs will be ignored, +and validation frames in the resulting dataset will get annotations from the GT job. + +> Note that it means that exporting from a task with honeypots and importing the results back +> will result in changed annotations on the honeypot frames. If you want to backup annotations, +> use a task backup or export job annotations instead. + +Import and export of Ground Truth **job** annotations works the same way in both modes. + +Ground Truth jobs are included in task backups, so can be saved and restored this way. + +Import, Export, and Delete options are available from the Ground Truth job Actions menu. +[Read more](#ground-truth-job-management). ### Annotation quality settings @@ -181,48 +365,227 @@ three dots. The following window will open. Hover over the **?** marks to understand what each field represents. -![Add new job](/images/honeypot08.jpg) +![Quality settings page](/images/honeypot08.jpg) Annotation quality settings have the following parameters: -| Field | Description | -| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Min overlap threshold | Min overlap threshold(IoU) is used for the distinction between matched / unmatched shapes. | -| Low overlap threshold | Low overlap threshold is used for the distinction between strong/weak (low overlap) matches. | -| OKS Sigma | IoU threshold for points. The percent of the box area, used as the radius of the circle around the GT point, where the checked point is expected to be. | -| Relative thickness (frame side %) | Thickness of polylines, relative to the (image area) ^ 0.5. The distance to the boundary around the GT line inside of which the checked line points should be. | -| Check orientation | Indicates that polylines have direction. | -| Min similarity gain (%) | The minimal gain in the GT IoU between the given and reversed line directions to consider the line inverted. Only useful with the Check orientation parameter. | -| Compare groups | Enables or disables annotation group checks. | -| Min group match threshold | Minimal IoU for groups to be considered matching, used when the Compare groups are enabled. | -| Check object visibility | Check for partially-covered annotations. Masks and polygons will be compared to each other. | -| Min visibility threshold | Minimal visible area percent of the spatial annotations (polygons, masks). For reporting covered annotations, useful with the Check object visibility option. | -| Match only visible parts | Use only the visible part of the masks and polygons in comparisons. | +| **Parameter** | **Description** | +| - | - | +| _General reporting_ | +| Target metric | The primary metric used for quality estimation. It affects which metric is displayed in the UI and used for overall quality estimation. | + +| _Immediate feedback_ | | +| - | - | +| Max validations per job | Configures maximum job validations per assignment for the {{< ilink "/docs/enterprise/immediate-feedback" "Immediate feedback" >}} feature. | +| Target metric threshold | Defines the minimal quality requirements in terms of the selected target metric. Serves as an acceptance threshold for the {{< ilink "/docs/enterprise/immediate-feedback" "Immediate feedback" >}} feature. | + +| _Shape matching_ | | +| - | - | +| Min overlap threshold | Min overlap threshold used for the distinction between matched and unmatched shapes. Used to match all types of annotations. It corresponds to the Intersection over union (IoU) for spatial annotations, such as bounding boxes and masks. | +| Low overlap threshold | Low overlap threshold used for the distinction between strong and weak matches. Only affects _Low overlap_ warnings. It's supposed that _Min similarity threshold_ <= _Low overlap threshold_. | +| Match empty frames | Consider frames matched if there are no annotations both on GT and regular job frames | + +| _Point and Skeleton matching_ | | +| - | - | +| OKS Sigma | Relative size of points. The percent of the bbox side, used as the radius of the circle around the GT point, where the checked point is expected to be. For boxes with different width and height, the "side" is computed as a geometric mean of the width and height. | + +| _Point matching_ | | +| - | - | +| Point size base | When comparing point annotations (including both separate points and point groups), the OKS sigma parameter defines a matching area for each GT point based on the object size. The point size base parameter allows configuring how to determine the object size. If set to _image_size_, the image size is used. Useful if each point annotation represents a separate object or boxes grouped with points do not represent object boundaries. If set to _group_bbox_size_, the object size is based on the point group bounding box size. Useful if each point group represents an object or there is a bbox grouped with points, representing the object size. | + +| _Polyline matching_ | | +| - | - | +| Relative thickness | Thickness of polylines, relative to the (image area) ^ 0.5. The distance to the boundary around the GT line inside of which the checked line points should be. | +| Check orientation | Indicates that polylines have direction. Used to produce _Mismatching direction_ warnings | +| Min similarity gain (%) | The minimal gain in IoU between the given and reversed line directions to consider the line inverted. Only useful with the _Check orientation_ parameter. | + +| _Group matching_ | | +| - | - | +| Compare groups | Enables or disables annotation group checks. This check will produce _Group mismatch_ warnings for grouped annotations, if the annotation groups do not match with the specified threshold. Each annotation within a group is expected to match with a corresponding annotation in a GT group. | +| Min group match threshold | Minimal IoU for groups to be considered matching, used when _Compare groups_ is enabled. | + +| _Mask and polygon matching_ | | +| - | - | +| Check object visibility | Check for partially-covered annotations. Masks and polygons will be compared to each other. | +| Min visibility threshold | Minimal visible area percent of the mask annotations (polygons, masks). Used for reporting _Covered annotation_ warnings, useful with the _Check object visibility_ option. | +| Match only visible parts | Use only the visible part of the masks and polygons in comparisons. | -### GT conflicts in the CVAT interface +## Comparisons + +### Tags + +The equality is used for matching. + +### Shapes + +A pair of shapes is considered matching, if both their shapes and labels match. +For each shape, spatial parameters are matched first, then labels are matched. + +Each shape type can have their own spatial matching details. Specifically: +- bounding box - [IoU](https://en.wikipedia.org/wiki/Jaccard_index) (including rotation). + For example, for a pair of bounding boxes it can be visualized this way: + + ![Bbox IoU](/images/quality_comparison_bbox1.svg) + +
`IoU = intersection area / union area`.
+ The green part is the intersection, and green, yellow and red ones together are the union. + +- polygons, masks - IoU. Polygons and masks are considered interchangeable, + which means a mask can be matched with a polygon and vice versa. Polygons and masks in groups + are merged into a single object first. + If the [_Match only visible parts_](#annotation-quality-settings) option is enabled, + objects will be cut to only the visible (non-covered) parts only, which is determined by the + shape z order. +- skeletons - The OKS metric [from the COCO](https://cocodataset.org/#keypoints-eval) + dataset is used. Briefly, each skeleton point gets a circular area around, + determined by the _object size_ (bounding box side) and _relative point size_ (_sigma_) values, + where this point can be matched with the specified probability. If a bounding box is grouped + with the skeleton, it is used for object size computation, otherwise a bounding box of + visible points of the skeleton is used. + + For example, consider a skeleton with 6 points and a square bounding box attached: + + ![Skeleton OKS](/images/quality_comparison_skeleton1.svg) + + In this example, the _Sigma_ parameter is `0.05` (5%) of the bbox side. + Areas shown in the green color cover ~68.2% (1 sigma) of the points, + corresponding to each GT point. A point on the boundary of such an area will have ~88% of + probability to be correct. The blue-colored zone contains ~95% (2 sigma) of the correct points + for the corresponding GT point. A point on the boundary of such an area will have ~60% of + probability to be correct. These probabilities are then averaged over the visible points of the + skeleton, and the resulting values are compared against the _Min similarity threshold_ + to determine whether the skeletons are matching. _Sigma_ corresponds to one + from the [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution). + +- points - The OKS metric is used for each point group annotation. Same as for skeletons, + _OKS Sigma_ determines relative point sizes. The _Point size base_ setting allows + configuring whether points in point groups should use the group bounding box or the image space. + Using image space for object size can be useful if you want to treat each point + as a separate annotation. +- polylines - A pair of lines is considered matching if all the points of one line lie within + a "hull" of the other one. The "hull" is determined as the area around the polyline, such as + if the line had some "thickness". For example, the black polyline can have a hull shown in + the green color: + + ![Polyline thickness and hull](/images/quality_comparison_polylines1.png) + + The line thickness can be configured via the _Relative thickness_ setting. + The value is relative to the image side and determines a half of the hull width. +- ellipses - IoU, described in more detail above. + +> **Note**: 2d cuboids are not supported + +### Tracks + +Tracks are split into separate shapes and compared on the per-frame basis with other tracks +and shapes. + +## Quality Analytics + +> **Note**: quality analytics is a paid feature. Please check how to get access to this +> functionality in the {{< ilink "/docs/enterprise" "Paid features" >}} section of the site. + +Once the quality estimation is [enabled in a task](#configuring-quality-estimation) +and the Ground Truth job is configured, quality analytics becomes available +for the task and its jobs. + +When you open the Quality Analytics page, it displays quality metrics from the most recent quality estimation. +If it's your first time accessing the page, no quality report will be available yet. +The date of the last computation is shown next to the report download button. + +If you want to request updating of quality metrics in a task (e.g. after the settings were changed), +you can do this by pressing the **Refresh** button on the +task **Quality Management** > **Analytics** page. + +> **Note** that the process of quality calculation may take up to several hours, depending on +> the amount of data and labeled objects, and is **not updated immediately** after task updates. + +![Quality Analytics page - refresh button](/images/honeypot11.jpg) + +Once quality metrics are computed, they are available for detailed review on this page. +Conflicts can be reviewed in the [Review mode of jobs](#reviewing-gt-conflicts). +A job must have at least 1 validation frame (shown in the **Frame intersection** column) to +be included in quality computation. + +### Analytics page contents + +The Analytics page has the following elements: + +![Quality Analytics page](/images/honeypot05.jpg) + + + +| Field | Description | +| - | - | +| Mean annotation quality | Displays the average quality of annotations, which includes: counts of the accurate annotations, total task annotations, ground truth annotations, accuracy, precision, and recall. The currently selected _Target metric_ is displayed as the primary score. | +| GT Conflicts | Conflicts identified during quality assessment, including extra or missing annotations. Mouse over the **?** icon for a detailed conflict report on your dataset. | +| Issues | Number of {{< ilink "/docs/manual/advanced/analytics-and-monitoring/manual-qa" "opened issues" >}}. If no issues were reported, 0 will be shown. | +| Quality report | Quality report in JSON format. | +| Ground truth job data | Information about ground truth job, including date, time, and number of issues. | +| List of jobs | List of all the jobs in the task | + + + +![Jobs list](/images/honeypot12.jpg) + +### Problem Reporting + +CVAT reports 2 possible error types: errors and warnings. Errors affect the resulting quality +scores and highlight significant problems in annotations. Warnings do not affect the resulting +quality metrics, but they still can highlight significant problems, depending on the project +requirements. + +| **Problem** | **Type** | **Description** | +| - | - | - | +| Missing annotation | error | No matching annotation found in the regular job annotations. [Configured](#annotation-quality-settings) by _Min overlap threshold_ and shape type-specific parameters. | +| Extra annotation | error | No matching annotation found in the GT job annotations. [Configured](#annotation-quality-settings) by _Min overlap threshold_ and shape type-specific parameters. | +| Mismatching label | error | A GT and a regular job annotations match, but their labels are different. | +| Low overlap | warning | A GT and a regular job annotations match, but the similarity is low. [Configured](#annotation-quality-settings) by _Low overlap threshold_. | +| Mismatching direction | warning | A GT and a regular lines match, but the lines have different direction. [Configured](#annotation-quality-settings) by _Compare orientation_. | +| Mismatching attributes | warning | A GT and a regular annotations match, but their attributes are different. [Configured](#annotation-quality-settings) by _Compare attributes_. | +| Mismatching groups | warning | A GT and a regular annotation groups do not match. [Configured](#annotation-quality-settings) by _Compare groups_. | +| Covered annotation | warning | The visible part of a regular mask or polygon annotation is too small. The visibility is determined by arranging mask and polygon shapes on the frame in the specified _z order_. [Configured](#annotation-quality-settings) by _Check object visibility_. | + +### Quality Reports + +For each job included in quality computation there is a quality report downloading button on +the [Analytics page](#analytics-page-contents). There is also a button to download the aggregated +task quality report. These buttons provide an option to download a Quality Report for a task or job +in JSON format. Such reports can be useful if you want to process quality reported by CVAT +automatically in your scripts etc. + +![Download report](/images/quality_download_report.png) + +Quality Reports contain quality metrics and conflicts, and include all the information +available on the quality analytics page. You can find additional quality metrics in these reports, +such as _mean_iou_ for shapes, confusion matrices, per-label and per-frame quality estimations. + +Additional information on how to compute and use various metrics for dataset +quality estimation is available [here](https://en.wikipedia.org/wiki/Confusion_matrix). + +### Reviewing GT conflicts To see GT Conflicts in the CVAT interface, go to **Review** > **Issues** > **Show ground truth annotations and conflicts**. -![GT conflict](/images/honeypot06.gif) +![GT conflicts review - enable](/images/honeypot06.gif) -The ground truth (GT) annotation is depicted as -a dotted-line box with an associated label. +Ground Truth annotations are displayed with a dotted-line border. +The associated label and the `(Ground Truth)` marker are shown on hovering. Upon hovering over an issue on the right-side panel with your mouse, -the corresponding GT Annotation gets highlighted. +the corresponding annotations are highlighted. Use arrows in the Issue toolbar to move between GT conflicts. -To create an issue related to the conflict, -right-click on the bounding box and from the +To create an issue related to the conflict, right-click on the bounding box and from the menu select the type of issue you want to create. -![GT conflict](/images/honeypot07.jpg) +![GT conflicts review - create issue](/images/honeypot07.jpg) ## Annotation quality & Honeypot video tutorial diff --git a/site/content/en/docs/manual/advanced/annotation-with-brush-tool.md b/site/content/en/docs/manual/advanced/annotation-with-brush-tool.md index 8f672c1b892a..bc135b40273d 100644 --- a/site/content/en/docs/manual/advanced/annotation-with-brush-tool.md +++ b/site/content/en/docs/manual/advanced/annotation-with-brush-tool.md @@ -42,6 +42,7 @@ It has the following elements: | ![Brush size](/images/brushing_tools_brush_size.png) | **Brush size** in pixels.
**Note:** Visible only when **Brush** or **Eraser** are selected. | | ![Brush shape](/images/brushing_tools_brush_shape.png) | **Brush shape** with two options: circle and square.
**Note:** Visible only when **Brush** or **Eraser** are selected. | | ![Pixel remove](/images/brushing_tools_pixels.png) | **Remove underlying pixels**. When you are drawing or editing a mask with this tool,
pixels on other masks that are located at the same positions as the pixels of the
current mask are deleted. | +| ![Hide mask](/images/brushing_tools_hide.png) | **Hide mask**. When drawing or editing a mask, you can enable this feature to temporarily hide the mask, allowing you to see the objects underneath more clearly. | | ![Label](/images/brushing_tools_label_drop.png) | **Label** that will be assigned to the newly created mask | | | ![Move](/images/brushing_tools_brush_move.png) | **Move**. Click and hold to move the menu bar to the other place on the screen | diff --git a/site/content/en/docs/manual/advanced/annotation-with-polygons/creating-mask.md b/site/content/en/docs/manual/advanced/annotation-with-polygons/creating-mask.md index 047c10605a05..5920c1c76ec8 100644 --- a/site/content/en/docs/manual/advanced/annotation-with-polygons/creating-mask.md +++ b/site/content/en/docs/manual/advanced/annotation-with-polygons/creating-mask.md @@ -7,7 +7,7 @@ weight: 6 ### Cutting holes in polygons Currently, CVAT does not support cutting transparent holes in polygons. However, -it is poissble to generate holes in exported instance and class masks. +it is possible to generate holes in exported instance and class masks. To do this, one needs to define a background class in the task and draw holes with it as additional shapes above the shapes needed to have holes: diff --git a/site/content/en/docs/manual/advanced/contextual-images.md b/site/content/en/docs/manual/advanced/contextual-images.md index 4dcdf6d178ce..fb5eda39b52f 100644 --- a/site/content/en/docs/manual/advanced/contextual-images.md +++ b/site/content/en/docs/manual/advanced/contextual-images.md @@ -148,7 +148,7 @@ Each context image has the following elements: | 1 | **Full screen**. Click to expand the contextual image in to the full screen mode.

Click again to revert contextual image to windowed mode. | | 2 | **Move contextual image**. Hold and move contextual image to the other place on the screen.

![contex_images_3](/images/context_img_03.gif) | | 3 | **Name**. Unique contextual image name | -| 4 | **Select contextual image**. Click to open a horisontal listview of all available contextual images.

Click on one to select. | +| 4 | **Select contextual image**. Click to open a horizontal listview of all available contextual images.

Click on one to select. | | 5 | **Close**. Click to remove image from contextual images menu. | | 6 | **Extend** Hold and pull to extend the image. | diff --git a/site/content/en/docs/manual/advanced/formats/format-coco.md b/site/content/en/docs/manual/advanced/formats/format-coco.md index 4b467dff3f11..72193d440be5 100644 --- a/site/content/en/docs/manual/advanced/formats/format-coco.md +++ b/site/content/en/docs/manual/advanced/formats/format-coco.md @@ -57,7 +57,7 @@ such as `instances`, `panoptic`, `image_info`, `labels`, `captions`, or `stuff`. ## COCO import -Uplod format: a single unpacked `*.json` or a zip archive with the structure described above or +Upload format: a single unpacked `*.json` or a zip archive with the structure described above or [here](https://openvinotoolkit.github.io/datumaro/latest/docs/data-formats/formats/coco.html#import-coco-dataset) (without images). diff --git a/site/content/en/docs/manual/advanced/serverless-tutorial.md b/site/content/en/docs/manual/advanced/serverless-tutorial.md index 7355d22d982e..5211886208e8 100644 --- a/site/content/en/docs/manual/advanced/serverless-tutorial.md +++ b/site/content/en/docs/manual/advanced/serverless-tutorial.md @@ -974,9 +974,9 @@ you can use the Ubuntu subsystem, for this do the following: [detectron2-tutorial]: https://detectron2.readthedocs.io/en/latest/tutorials/getting_started.html [retinanet-model-zoo]: https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md#retinanet [faster-rcnn-function]: https://raw.githubusercontent.com/cvat-ai/cvat/38b774046d41d604ed85a521587e4bacce61b69c/serverless/tensorflow/faster_rcnn_inception_v2_coco/nuclio/function.yaml -[nuclio-doc]: https://nuclio.io/docs/latest/reference/function-configuration/function-configuration-reference/ -[nuclio-http-trigger-doc]: https://nuclio.io/docs/latest/reference/triggers/http/ -[nuclio-bkms-doc]: https://nuclio.io/docs/latest/concepts/best-practices-and-common-pitfalls/ +[nuclio-doc]: https://nuclio.io/docs/latest/reference/function-configuration/function-configuration-reference.html +[nuclio-http-trigger-doc]: https://nuclio.io/docs/latest/reference/triggers/http.html +[nuclio-bkms-doc]: https://nuclio.io/docs/latest/concepts/best-practices-and-common-pitfalls.html [retinanet-function-yaml]: https://github.com/cvat-ai/cvat/blob/b2f616859ca64687c385e636b4a25014fbb9d17c/serverless/pytorch/facebookresearch/detectron2/retinanet/nuclio/function.yaml [retinanet-main-py]: https://github.com/cvat-ai/cvat/blob/b2f616859ca64687c385e636b4a25014fbb9d17c/serverless/pytorch/facebookresearch/detectron2/retinanet/nuclio/main.py [nuclio-homepage]: https://nuclio.io/ diff --git a/site/content/en/docs/manual/advanced/single-shape.md b/site/content/en/docs/manual/advanced/single-shape.md index e6d02c994976..78fea012f8a8 100644 --- a/site/content/en/docs/manual/advanced/single-shape.md +++ b/site/content/en/docs/manual/advanced/single-shape.md @@ -50,7 +50,7 @@ The **Single Shape** annotation mode has the following fields: | **Skip Button** | Enables moving to the next frame without annotating the current one, particularly useful when the frame does not have anything to be annotated. | | **List of Hints** | Offers guidance on using the interface effectively, including:
- Click **Skip** for frames without required annotations.
- Hold the **Alt** button to avoid unintentional drawing (e.g. when you want only move the image).
- Use the **Ctrl+Z** combination to undo the last action if needed.
- Use the **Esc** button to completely reset the current drawing progress. | | **Label selector** | Allows for the selection of different labels (`cat`, or `dog` in our example) for annotation within the interface. | -| **Label type selector** | A drop-down list to select type of the label (rectangle, ellipce, etc). Only visible when the type of the shape is **Any**. | +| **Label type selector** | A drop-down list to select type of the label (rectangle, ellipse, etc). Only visible when the type of the shape is **Any**. | | **Options to Enable or Disable** | Provides configurable options to streamline the annotation process, such as:
- **Automatically go to the next frame**.
- **Automatically save when finish**.
- **Navigate only empty frames**.
- **Predefined number of points** - Specific to polyshape annotations, enabling this option auto-completes a shape once a predefined number of points is reached. Otherwise, pressing **N** is required to finalize the shape. | | **Number of Points** | Applicable for polyshape annotations, indicating the number of points to use for image annotation. | diff --git a/site/content/en/docs/manual/basics/CVAT-annotation-Interface/controls-sidebar.md b/site/content/en/docs/manual/basics/CVAT-annotation-Interface/controls-sidebar.md index 07a7d957b47f..775ab3c5dbaa 100644 --- a/site/content/en/docs/manual/basics/CVAT-annotation-Interface/controls-sidebar.md +++ b/site/content/en/docs/manual/basics/CVAT-annotation-Interface/controls-sidebar.md @@ -10,7 +10,7 @@ description: 'Offers tools for navigating within the image, annotation tools, an **Navigation block** - contains tools for moving and rotating images. |Icon |Description | |-- |-- | -|![](/images/image148.jpg)|`Cursor` (`Esc`)- a basic annotation pedacting tool. | +|![](/images/image148.jpg)|`Cursor` (`Esc`)- a basic annotation editing tool. | |![](/images/image149.jpg)|`Move the image`- a tool for moving around the image without
the possibility of editing.| |![](/images/image102.jpg)|`Rotate`- two buttons to rotate the current frame
a clockwise (`Ctrl+R`) and anticlockwise (`Ctrl+Shift+R`).
You can enable `Rotate all images` in the settings to rotate all the images in the job| diff --git a/site/content/en/docs/manual/basics/CVAT-annotation-Interface/navbar.md b/site/content/en/docs/manual/basics/CVAT-annotation-Interface/navbar.md index 74a6af04120b..034f6b9aad3f 100644 --- a/site/content/en/docs/manual/basics/CVAT-annotation-Interface/navbar.md +++ b/site/content/en/docs/manual/basics/CVAT-annotation-Interface/navbar.md @@ -82,6 +82,6 @@ toggle between different annotation and QA modes. | **Fullscreen**

![Fullscreen](/images/image143.jpg) | The fullscreen player mode. The keyboard shortcut is **F11**.                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                     | | **Info**

![Info](/images/image143_2.png)     | Open the job info.

![](/images/image144_detrac.png)

Overview:

  • **Assignee** - the individual to whom the job is assigned.
  • **Reviewer**– the user tasked with conducting the review. For more information, see [**Manual QA**](/docs/manual/advanced/analytics-and-monitoring/manual-qa")
  • **Start frame** - the number of the first frame in this job.
  • **Stop frame** - the number of the last frame in this job.
  • **Frames** - the total number of frames in the job.

**Annotations Statistics** table displays the number of created shapes, categorized by labels (e.g., vehicle, person) and the type of annotation (shape, track), as well as the count of manual and interpolated frames. | | **Filters**

![](/images/image143_3.png)   | Switches on [**Filters**](/docs/manual/advanced/filter/).                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                         | -| **Workplace Switcher** | The drop-down list to swithc between different annotation modes:

![](/images/ui-swithcer.png)

Overview:
  • **Standart** -- default mode.
  • **Attribute** -- annotation with [**Attributes**](docs/manual/advanced/attribute-annotation-mode-advanced/)
  • **Single Shape** -- [**Single shape**](/docs/manual/advanced/single-shape/) annotation mode.
  • **Tag annotation**- annotation with [Tags](/docs/manual/advanced/annotation-with-tags/)
  • **Review** -- [**Manual QA**](/manual/advanced/analytics-and-monitoring/manual-qa/) mode.                                                                                                                                                                               | +| **Workplace Switcher** | The drop-down list to switch between different annotation modes:

    ![](/images/ui-swithcer.png)

    Overview:
    • **Standard** -- default mode.
    • **Attribute** -- annotation with [**Attributes**](docs/manual/advanced/attribute-annotation-mode-advanced/)
    • **Single Shape** -- [**Single shape**](/docs/manual/advanced/single-shape/) annotation mode.
    • **Tag annotation**- annotation with [Tags](/docs/manual/advanced/annotation-with-tags/)
    • **Review** -- [**Manual QA**](/manual/advanced/analytics-and-monitoring/manual-qa/) mode.                                                                                                                                                                               | diff --git a/site/content/en/docs/manual/basics/attach-cloud-storage.md b/site/content/en/docs/manual/basics/attach-cloud-storage.md index 4bc3e14e8c16..075b9a2d1a71 100644 --- a/site/content/en/docs/manual/basics/attach-cloud-storage.md +++ b/site/content/en/docs/manual/basics/attach-cloud-storage.md @@ -159,7 +159,7 @@ aws s3 cp --recursive ``` 4. After copying the files, you can create a manifest file as described in - {{< ilink "/docs/manual/advanced/dataset_manifest" "preapair manifest file section" >}}: + {{< ilink "/docs/manual/advanced/dataset_manifest" "prepare manifest file section" >}}: ```bash python /utils/dataset_manifest/create.py --output-dir @@ -329,7 +329,7 @@ To create bucket, do the following: - **Storage account name**: to access container from CVAT. - Select a region closest to you. - - Select **Performance** > **Standart**. + - Select **Performance** > **Standard**. - Select **Local-redundancy storage (LRS)**. - Click **next: Advanced>**. @@ -387,7 +387,7 @@ Use the SAS token or connection string to grant secure access to the container. To configure the credentials: -1. Go to **Home** > **Resourse groups** > You resource name > Your storage account. +1. Go to **Home** > **Resource groups** > You resource name > Your storage account. 2. On the left menu, click **Shared access signature**. 3. Change the following fields: - **Allowed services**: Enable **Blob** . Disable all other fields. diff --git a/site/content/en/docs/manual/basics/create_an_annotation_task.md b/site/content/en/docs/manual/basics/create_an_annotation_task.md index b9501a110765..1e4c84a941f3 100644 --- a/site/content/en/docs/manual/basics/create_an_annotation_task.md +++ b/site/content/en/docs/manual/basics/create_an_annotation_task.md @@ -260,7 +260,7 @@ The following parameters are available: | Use zip/video chunks | Use this parameter to divide your video or image dataset for annotation into short video clips a zip file of frames.
      Zip files are larger but do not require decoding on the client side, and video clips are smaller but require decoding.
      It is recommended to turn off this parameter for video tasks to reduce traffic between the client side and the server. | | Use cache | Select checkbox, to enable _on-the-fly_ data processing to reduce task creation time and store data in a cache with a policy of
      evicting less popular items.

      For more information, see {{< ilink "/docs/manual/advanced/data_on_fly" "Data preparation on the fly" >}}. | | Image Quality | CVAT has two types of data: original quality and compressed. Original quality images are used for dataset export
      and automatic annotation. Compressed images are used only for annotations to reduce traffic between the server
      and client side.
      It is recommended to adjust the compression level only if the images contain small objects that are not
      visible in the original quality.
      Values range from `5` (highly compressed images) to `100` (not compressed | -| Overlap Size | Use this parameter to create overlapped segments, making tracking continuous from one segment to another.

      **Note** that this functionality only works for bounding boxes.

      This parameter has the following options:

      **Interpolation task** (video sequence). If you annotate with a bounding box on two adjacent segments, they will be
      merged into a single bounding box. In case the overlap is zero or the bounding box is inaccurate (not enclosing the object
      properly, misaligned or distorted) on the adjacent segments, it may be difficult to accurately interpole the object's
      movement between the segments. As a result, multiple tracks will be created for the same object.

      **Annotation task** (independent images). If an object exists on overlapped segments with overlap greater than zero,
      and the annotation of these segments is done properly, then the segments will be automatically merged into a single
      object. If the overlap is zero or the annotation is inaccurate (not enclosing the object properly, misaligned, distorted) on the
      adjacent segments, it may be difficult to accurately track the object. As a result, multiple bounding boxes will be
      created for the same object.

      If the annotations on different segments (on overlapped frames) are very different, you will have two shapes
      for the same object.

      To avoid this, accurately annotate the object on the first segment and the same object on the second segment to create a track
      between two annotations. | +| Overlap Size | Use this parameter to create overlapped segments, making tracking continuous from one segment to another.

      **Note** that this functionality only works for bounding boxes.

      This parameter has the following options:

      **Interpolation task** (video sequence). If you annotate with a bounding box on two adjacent segments, they will be
      merged into a single bounding box. In case the overlap is zero or the bounding box is inaccurate (not enclosing the object
      properly, misaligned or distorted) on the adjacent segments, it may be difficult to accurately interpolate the object's
      movement between the segments. As a result, multiple tracks will be created for the same object.

      **Annotation task** (independent images). If an object exists on overlapped segments with overlap greater than zero,
      and the annotation of these segments is done properly, then the segments will be automatically merged into a single
      object. If the overlap is zero or the annotation is inaccurate (not enclosing the object properly, misaligned, distorted) on the
      adjacent segments, it may be difficult to accurately track the object. As a result, multiple bounding boxes will be
      created for the same object.

      If the annotations on different segments (on overlapped frames) are very different, you will have two shapes
      for the same object.

      To avoid this, accurately annotate the object on the first segment and the same object on the second segment to create a track
      between two annotations. | | Segment size | Use this parameter to divide a dataset into smaller parts. For example, if you want to share a dataset among multiple
      annotators, you can split it into smaller sections and assign each section to a separate job.
      This allows annotators to work on the same dataset concurrently. | | Start frame | Defines the first frame of the video. | | Stop frame | Defines the last frame of the video. | diff --git a/site/content/en/docs/manual/basics/quality-control.md b/site/content/en/docs/manual/basics/quality-control.md new file mode 100644 index 000000000000..af9d8a5837cf --- /dev/null +++ b/site/content/en/docs/manual/basics/quality-control.md @@ -0,0 +1,109 @@ +--- +title: 'Quality control' +linkTitle: 'Quality control' +weight: 21 +description: 'Overview of quality control features' +--- + +CVAT has the following features for automated quality control of annotations: +- [Validation set configuration for a task](#how-to-enable-quality-control) +- Job validation on job finish ("{{< ilink "/docs/enterprise/immediate-feedback" "Immediate feedback" >}}") +- [Review mode for problems found](#how-to-review-problems-found) +- [Quality analytics](#how-to-check-task-quality-metrics) + +In this section we only highlight the key steps in quality estimation. +Read the detailed guide on quality estimation in CVAT in the +{{< ilink "/docs/manual/advanced/analytics-and-monitoring/auto-qa" "Advanced section" >}}. + +## How to enable quality control + +{{< tabpane text=true >}} + +{{%tab header="In a new task" %}} + +1. Go to task creation +2. Select the source media, configure other task parameters +3. Scroll down to the **Quality Control** section +4. Select one of the +{{< ilink "/docs/manual/advanced/analytics-and-monitoring/auto-qa#validation-modes" "validation modes" >}} available + + ![Create task with validation mode](/images/honeypot09.jpg) + +5. Create the task +6. Upload or create Ground Truth annotations in the Ground Truth job in the task +7. Switch the Ground Truth job into the `acceptance` stage and `completed` state + + ![Set job status](/images/honeypot10.jpg) + +{{% /tab %}} + +{{%tab header="In an existing task" %}} + +> For already existing tasks only the Ground Truth validation mode is available. If you want +> to use Honeypots for your task, you will need to recreate the task. + +1. Open the task page +2. Click the `+` button next to the job list + + ![Create job](/images/honeypot01.jpg) + +3. Select Job Type **Ground truth** and configure the job parameters + + ![Configure job parameters](/images/honeypot02.jpg) + +4. Upload or create Ground Truth annotations in the Ground Truth job in the task +5. Switch the Ground Truth job into the `acceptance`stage and `completed` state + + ![Set job status](/images/honeypot10.jpg) + +{{% /tab %}} + +{{< /tabpane >}} + +## How to enable immediate job feedback + +> **Note**: This feature requires a configured validation set in the task. Read more +> in [How to enable quality control](#how-to-enable-quality-control) and in the +> {{< ilink "/docs/manual/advanced/analytics-and-monitoring/auto-qa#configuring-quality-estimation" "full guide" >}}. + +1. Open the task **Actions** menu > **Quality control** > **Settings** +2. Set **Max validations per job** to above zero. 3 is a good starting number. + + ![Configure job validations](/images/immediate-feedback-quality-settings.png) + +3. Save the updated settings +4. Assign an annotator to an annotation job +5. Annotate the job +6. Mark the job finished using the corresponding button in the menu +7. Once the job is completed, you'll see the job validation dialog + + + +Each assignee gets no more than the specified number of validation attempts. + +Read more about this functionality in the +{{< ilink "/docs/enterprise/immediate-feedback" "Immediate Feedback" >}} section. + +## How to check task quality metrics + +1. Open the task **Actions** menu > **Quality control** +2. (optional) Request quality metrics computation, wait for completion +3. Review summaries or detailed reports + + ![Quality Analytics page](/images/honeypot05.jpg) + +Read more about this functionality +{{< ilink "/docs/manual/advanced/analytics-and-monitoring/auto-qa#quality-analytics" "here" >}}. + +## How to review problems found + +1. Open the task **Actions** menu > **Quality control** +2. Find an annotation job to be reviewed, it must have at least 1 validation frame +3. Click the job link +4. Switch to the **Review** mode +5. Enable display of Ground Truth annotations and conflicts + + ![GT conflict](/images/honeypot06.gif) + +Read more about this functionality +{{< ilink "/docs/manual/advanced/analytics-and-monitoring/auto-qa#reviewing-gt-conflicts" "here" >}}. diff --git a/site/content/en/docs/manual/basics/registration.md b/site/content/en/docs/manual/basics/registration.md index a56876d79554..8d3a282b1ae7 100644 --- a/site/content/en/docs/manual/basics/registration.md +++ b/site/content/en/docs/manual/basics/registration.md @@ -40,7 +40,7 @@ To register, do the following:
      A username generates from the email automatically. You can edit it if needed. -![Usernname generation](/images/filling_email.gif) +![Username generation](/images/filling_email.gif) ## User registration with social accounts diff --git a/site/content/en/images/brushing_tool_menu.png b/site/content/en/images/brushing_tool_menu.png index f5d6726d1d17..418f58f6ba36 100644 Binary files a/site/content/en/images/brushing_tool_menu.png and b/site/content/en/images/brushing_tool_menu.png differ diff --git a/site/content/en/images/brushing_tools_hide.png b/site/content/en/images/brushing_tools_hide.png new file mode 100644 index 000000000000..e9d7ba5552bd Binary files /dev/null and b/site/content/en/images/brushing_tools_hide.png differ diff --git a/site/content/en/images/honeypot09.jpg b/site/content/en/images/honeypot09.jpg new file mode 100644 index 000000000000..95a53f3fe3ac Binary files /dev/null and b/site/content/en/images/honeypot09.jpg differ diff --git a/site/content/en/images/honeypot10.jpg b/site/content/en/images/honeypot10.jpg new file mode 100644 index 000000000000..e4917324a5ef Binary files /dev/null and b/site/content/en/images/honeypot10.jpg differ diff --git a/site/content/en/images/honeypot11.jpg b/site/content/en/images/honeypot11.jpg new file mode 100644 index 000000000000..53eb9e58ff1b Binary files /dev/null and b/site/content/en/images/honeypot11.jpg differ diff --git a/site/content/en/images/honeypot12.jpg b/site/content/en/images/honeypot12.jpg new file mode 100644 index 000000000000..a018e4749d95 Binary files /dev/null and b/site/content/en/images/honeypot12.jpg differ diff --git a/site/content/en/images/immediate-feedback-quality-settings.png b/site/content/en/images/immediate-feedback-quality-settings.png index 26c934e32026..a56c1d40bd1a 100644 Binary files a/site/content/en/images/immediate-feedback-quality-settings.png and b/site/content/en/images/immediate-feedback-quality-settings.png differ diff --git a/site/content/en/images/quality_comparison_bbox1.svg b/site/content/en/images/quality_comparison_bbox1.svg new file mode 100644 index 000000000000..2bef3d53e22e --- /dev/null +++ b/site/content/en/images/quality_comparison_bbox1.svg @@ -0,0 +1,80 @@ + + + + + + + + + + + Intersection + Union + + IoU = + + diff --git a/site/content/en/images/quality_comparison_polylines1.png b/site/content/en/images/quality_comparison_polylines1.png new file mode 100644 index 000000000000..fca37af1283e Binary files /dev/null and b/site/content/en/images/quality_comparison_polylines1.png differ diff --git a/site/content/en/images/quality_comparison_skeleton1.svg b/site/content/en/images/quality_comparison_skeleton1.svg new file mode 100644 index 000000000000..ed3fa0cdcf88 --- /dev/null +++ b/site/content/en/images/quality_comparison_skeleton1.svg @@ -0,0 +1,275 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ~5% of the side,used asa point radius + + diff --git a/site/content/en/images/quality_download_report.png b/site/content/en/images/quality_download_report.png new file mode 100644 index 000000000000..3015d632f5ef Binary files /dev/null and b/site/content/en/images/quality_download_report.png differ diff --git a/site/content/en/images/sam2_tracker_run_action.png b/site/content/en/images/sam2_tracker_run_action.png new file mode 100644 index 000000000000..05d7222699be Binary files /dev/null and b/site/content/en/images/sam2_tracker_run_action.png differ diff --git a/site/content/en/images/sam2_tracker_run_action_modal.png b/site/content/en/images/sam2_tracker_run_action_modal.png new file mode 100644 index 000000000000..689dc8b1e1fb Binary files /dev/null and b/site/content/en/images/sam2_tracker_run_action_modal.png differ diff --git a/site/content/en/images/sam2_tracker_run_action_modal_progress.png b/site/content/en/images/sam2_tracker_run_action_modal_progress.png new file mode 100644 index 000000000000..f2fddc89f978 Binary files /dev/null and b/site/content/en/images/sam2_tracker_run_action_modal_progress.png differ diff --git a/site/content/en/images/sam2_tracker_run_shape_action.png b/site/content/en/images/sam2_tracker_run_shape_action.png new file mode 100644 index 000000000000..c13021c3b799 Binary files /dev/null and b/site/content/en/images/sam2_tracker_run_shape_action.png differ diff --git a/site/content/en/images/sam2_tracker_run_shape_action_modal.png b/site/content/en/images/sam2_tracker_run_shape_action_modal.png new file mode 100644 index 000000000000..daeb2501ab52 Binary files /dev/null and b/site/content/en/images/sam2_tracker_run_shape_action_modal.png differ diff --git a/site/content/en/images/sam2_tracker_run_shape_action_modal_progress.png b/site/content/en/images/sam2_tracker_run_shape_action_modal_progress.png new file mode 100644 index 000000000000..c590a872a634 Binary files /dev/null and b/site/content/en/images/sam2_tracker_run_shape_action_modal_progress.png differ diff --git a/site/process_sdk_docs.py b/site/process_sdk_docs.py index 3b1941248410..4fb911b69718 100755 --- a/site/process_sdk_docs.py +++ b/site/process_sdk_docs.py @@ -12,26 +12,24 @@ import sys import textwrap from glob import iglob -from typing import Callable, List +from typing import Callable from inflection import underscore class Processor: - _reference_files: List[str] + _reference_files: list[str] def __init__(self, *, input_dir: str, site_root: str) -> None: self._input_dir = input_dir self._site_root = site_root self._content_dir = osp.join(self._site_root, "content") - self._sdk_reference_dir = osp.join( - self._content_dir, "en/docs/api_sdk/sdk/reference" - ) + self._sdk_reference_dir = osp.join(self._content_dir, "en/docs/api_sdk/sdk/reference") self._templates_dir = osp.join(self._site_root, "templates") @staticmethod - def _copy_files(src_dir: str, glob_pattern: str, dst_dir: str) -> List[str]: + def _copy_files(src_dir: str, glob_pattern: str, dst_dir: str) -> list[str]: copied_files = [] for src_path in iglob(osp.join(src_dir, glob_pattern), recursive=True): @@ -97,9 +95,7 @@ def _move_api_summary(self): apis_index_filename = osp.join( osp.relpath(self._sdk_reference_dir, self._content_dir), "apis/_index.md" ) - apis_index_path = osp.join( - self._templates_dir, apis_index_filename + ".template" - ) + apis_index_path = osp.join(self._templates_dir, apis_index_filename + ".template") with open(apis_index_path) as f: contents = f.read() @@ -126,9 +122,7 @@ def _fix_page_links_and_references(self): os.rename(src_path, dst_path) mapping[src_filename] = dst_filename - self._reference_files = [ - osp.join(self._sdk_reference_dir, p) for p in mapping.values() - ] + self._reference_files = [osp.join(self._sdk_reference_dir, p) for p in mapping.values()] for p in iglob(self._sdk_reference_dir + "/**/*.md", recursive=True): with open(p) as f: @@ -146,9 +140,7 @@ def _fix_page_links_and_references(self): with open(p, "w") as f: f.write(contents) - def _process_non_code_blocks( - self, text: str, handlers: List[Callable[[str], str]] - ) -> str: + def _process_non_code_blocks(self, text: str, handlers: list[Callable[[str], str]]) -> str: """ Allows to process Markdown documents with passed callbacks. Callbacks are only executed outside code blocks. diff --git a/site/requirements.txt b/site/requirements.txt index e240c7a0f90e..10db0c33a9b0 100644 --- a/site/requirements.txt +++ b/site/requirements.txt @@ -1,5 +1,4 @@ gitpython inflection >= 0.5.1 -isort>=5.10.1 packaging toml diff --git a/supervisord/utils.conf b/supervisord/utils.conf index 1271e6eef536..dc7030023c35 100644 --- a/supervisord/utils.conf +++ b/supervisord/utils.conf @@ -26,19 +26,10 @@ environment=VECTOR_EVENT_HANDLER="SynchronousLogstashHandler" numprocs=1 autorestart=true -[program:rqworker-notifications] +[program:rqworker] command=%(ENV_HOME)s/wait_for_deps.sh - python3 %(ENV_HOME)s/manage.py rqworker -v 3 notifications + python3 %(ENV_HOME)s/manage.py rqworker -v 3 notifications cleaning --worker-class cvat.rqworker.DefaultWorker -environment=VECTOR_EVENT_HANDLER="SynchronousLogstashHandler",CVAT_POSTGRES_APPLICATION_NAME="cvat:worker:notifications" +environment=VECTOR_EVENT_HANDLER="SynchronousLogstashHandler",CVAT_POSTGRES_APPLICATION_NAME="cvat:worker:notifications+cleaning" numprocs=%(ENV_NUMPROCS)s autorestart=true - -[program:rqworker-cleaning] -command=%(ENV_HOME)s/wait_for_deps.sh - python3 %(ENV_HOME)s/manage.py rqworker -v 3 cleaning - --worker-class cvat.rqworker.DefaultWorker -environment=VECTOR_EVENT_HANDLER="SynchronousLogstashHandler",CVAT_POSTGRES_APPLICATION_NAME="cvat:worker:cleaning" -numprocs=%(ENV_NUMPROCS)s -process_name=%(program_name)s-%(process_num)d -autorestart=true diff --git a/supervisord/worker.chunks.conf b/supervisord/worker.chunks.conf new file mode 100644 index 000000000000..9eccd41e8cba --- /dev/null +++ b/supervisord/worker.chunks.conf @@ -0,0 +1,29 @@ +[unix_http_server] +file = /tmp/supervisord/supervisor.sock + +[supervisorctl] +serverurl = unix:///tmp/supervisord/supervisor.sock + + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[supervisord] +nodaemon=true +logfile=%(ENV_HOME)s/logs/supervisord.log ; supervisord log file +logfile_maxbytes=50MB ; maximum size of logfile before rotation +logfile_backups=10 ; number of backed up logfiles +loglevel=debug ; info, debug, warn, trace +pidfile=/tmp/supervisord/supervisord.pid ; pidfile location + +[program:rqworker-chunks] +command=%(ENV_HOME)s/wait_for_deps.sh + python3 %(ENV_HOME)s/manage.py rqworker -v 3 chunks + --worker-class cvat.rqworker.DefaultWorker +environment=VECTOR_EVENT_HANDLER="SynchronousLogstashHandler",CVAT_POSTGRES_APPLICATION_NAME="cvat:worker:chunks" +numprocs=%(ENV_NUMPROCS)s +process_name=%(program_name)s-%(process_num)d +autorestart=true + +[program:smokescreen] +command=smokescreen --listen-ip=127.0.0.1 %(ENV_SMOKESCREEN_OPTS)s diff --git a/tests/cypress.config.js b/tests/cypress.config.js index da1f20d8dee1..17f157c59c8d 100644 --- a/tests/cypress.config.js +++ b/tests/cypress.config.js @@ -1,3 +1,7 @@ +// Copyright (C) 2024 CVAT.ai Corporation +// +// SPDX-License-Identifier: MIT + const { defineConfig } = require('cypress'); const baseConfig = require('./cypress.base.config'); diff --git a/tests/cypress/e2e/actions_objects/case_99_save_filtered_object_in_AAM.js b/tests/cypress/e2e/actions_objects/case_99_save_filtered_object_in_AAM.js index 6f72aaf6219f..2e80ff356b5c 100644 --- a/tests/cypress/e2e/actions_objects/case_99_save_filtered_object_in_AAM.js +++ b/tests/cypress/e2e/actions_objects/case_99_save_filtered_object_in_AAM.js @@ -40,7 +40,7 @@ context('Save filtered object in AAM.', () => { }); describe(`Testing case "${caseId}"`, () => { - it(`Set filter label == “${labelName}”.`, () => { + it(`Set filter label == "${labelName}".`, () => { cy.addFiltersRule(0); cy.setFilter({ groupIndex: 0, diff --git a/tests/cypress/e2e/actions_objects/regression_tests.js b/tests/cypress/e2e/actions_objects/regression_tests.js index 8cf00b90e0c1..7bf11c7b0d7b 100644 --- a/tests/cypress/e2e/actions_objects/regression_tests.js +++ b/tests/cypress/e2e/actions_objects/regression_tests.js @@ -9,9 +9,9 @@ context('Regression tests', () => { let jobID = null; const taskPayload = { - name: 'Test annotations actions', + name: 'Regression tests', labels: [{ - name: 'label 1', + name: 'car', attributes: [], type: 'any', }], @@ -29,12 +29,9 @@ context('Regression tests', () => { }; const rectanglePayload = { - frame: 99, - objectType: 'shape', shapeType: 'rectangle', - points: [250, 64, 491, 228], occluded: false, - labelName: 'label 1', + labelName: taskPayload.labels[0].name, }; before(() => { @@ -45,41 +42,65 @@ context('Regression tests', () => { taskID = response.taskID; [jobID] = response.jobIDs; - cy.headlessCreateObjects([rectanglePayload], jobID); - cy.visit(`/tasks/${taskID}/jobs/${jobID}`); + cy.headlessCreateObjects([ + { + ...rectanglePayload, frame: 99, points: [250, 64, 491, 228], objectType: 'shape', + }, + { + ...rectanglePayload, frame: 0, points: [10, 10, 30, 30], objectType: 'track', + }, + ], jobID); }); }); - describe('Regression tests', () => { + describe('UI does not crash', () => { + beforeEach(() => { + cy.visit(`/tasks/${taskID}/jobs/${jobID}`); + cy.get('.cvat-canvas-container').should('not.exist'); + cy.get('.cvat-canvas-container').should('exist').and('be.visible'); + }); + it('UI does not crash if to activate an object while frame fetching', () => { - cy.reload(); cy.intercept('GET', '/api/jobs/**/data?**', (req) => { req.continue((res) => { - res.setDelay(1000); + res.setDelay(3000); }); }).as('delayedRequest'); + cy.get('.cvat-player-last-button').click(); - cy.get('#cvat_canvas_shape_1').trigger('mousemove'); - cy.get('#cvat_canvas_shape_1').should('not.have.class', 'cvat_canvas_shape_activated'); + cy.get('#cvat-objects-sidebar-state-item-1').trigger('mousemove'); + cy.get('#cvat-objects-sidebar-state-item-1').should('not.have.class', 'cvat-objects-sidebar-state-active-item'); cy.wait('@delayedRequest'); cy.get('#cvat_canvas_shape_1').trigger('mousemove'); cy.get('#cvat_canvas_shape_1').should('have.class', 'cvat_canvas_shape_activated'); }); + + it('UI does not crash if to navigate during an element resizing (issue 1922)', { scrollBehavior: false }, () => { + cy.get('#cvat_canvas_shape_2').then(([el]) => { + const rect = el.getBoundingClientRect(); + + cy.get('body').trigger('mousemove', rect.x + rect.width / 2, rect.y + rect.height / 2); + cy.get('#cvat_canvas_shape_2').should('have.class', 'cvat_canvas_shape_activated'); + + cy.get('body').trigger('mousedown', rect.right, rect.bottom, { button: 0 }); + cy.get('body').trigger('mousemove', rect.right + 100, rect.bottom + 100); + + cy.get('body').type('f'); // go to next frame + cy.get('body').trigger('mouseup'); + + // Page with the error is missing + cy.get('.cvat-global-boundary').should('not.exist'); + cy.checkFrameNum(0); + }); + }); }); after(() => { + if (taskID !== null) { + cy.headlessDeleteTask(taskID); + } cy.logout(); - cy.getAuthKey().then((response) => { - const authKey = response.body.key; - cy.request({ - method: 'DELETE', - url: `/api/tasks/${taskID}`, - headers: { - Authorization: `Token ${authKey}`, - }, - }); - }); }); }); diff --git a/tests/cypress/e2e/actions_objects2/case_16_z_order_features.js b/tests/cypress/e2e/actions_objects2/case_16_z_order_features.js index cb39d1bcd6c9..745af4157ba5 100644 --- a/tests/cypress/e2e/actions_objects2/case_16_z_order_features.js +++ b/tests/cypress/e2e/actions_objects2/case_16_z_order_features.js @@ -63,7 +63,7 @@ context('Actions on polygon', () => { cy.get('.cvat-canvas-container').click(); }); - it('Second shape is over the first shape', () => { + it('Second shape is over the first shape', () => { // The larger the index of an element in the array the closer it is to us cy.get('.cvat_canvas_shape').then(($canvasShape) => { expect(Number($canvasShape[1].id.match(/\d+$/))).to.be.equal(2); @@ -76,7 +76,7 @@ context('Actions on polygon', () => { cy.get('#cvat_canvas_shape_1').should('have.class', 'cvat_canvas_shape_activated'); }); - it('First shape is over the second shape', () => { + it('First shape is over the second shape', () => { // The larger the index of an element in the array the closer it is to us cy.get('.cvat_canvas_shape').then(($canvasShape) => { expect(Number($canvasShape[1].id.match(/\d+$/))).to.be.equal(1); diff --git a/tests/cypress/e2e/actions_objects2/case_17_lock_hide_features.js b/tests/cypress/e2e/actions_objects2/case_17_lock_hide_features.js index 855d452bbddc..97fcf850b984 100644 --- a/tests/cypress/e2e/actions_objects2/case_17_lock_hide_features.js +++ b/tests/cypress/e2e/actions_objects2/case_17_lock_hide_features.js @@ -218,7 +218,7 @@ context('Lock/hide features.', () => { cy.contains('Labels').click(); }); }); - it('Repeat hide/lock for one of the labels. Objects with other labels weren’t affected.', () => { + it("Repeat hide/lock for one of the labels. Objects with other labels weren't affected.", () => { const objectsSameLabel = ['cvat_canvas_shape_1', 'cvat_canvas_shape_2', 'cvat_canvas_shape_3']; cy.get('.cvat-objects-sidebar-labels-list').within(() => { // Hide and lock all object with "Main task" label (#cvat_canvas_shape_1-3). diff --git a/tests/cypress/e2e/actions_tasks3/case_44_changing_default_value_for_attribute.js b/tests/cypress/e2e/actions_tasks3/case_44_changing_default_value_for_attribute.js index fd641be141c0..48227db05641 100644 --- a/tests/cypress/e2e/actions_tasks3/case_44_changing_default_value_for_attribute.js +++ b/tests/cypress/e2e/actions_tasks3/case_44_changing_default_value_for_attribute.js @@ -33,7 +33,7 @@ context('Changing a default value for an attribute.', () => { }); describe(`Testing case "${caseId}", issue 2968`, () => { - it('Add a label, add text (leave it’s value empty by default) & checkbox attributes.', () => { + it('Add a label, add text (leave its value empty by default) & checkbox attributes.', () => { cy.intercept('PATCH', '/api/tasks/**').as('patchTask'); cy.addNewLabel({ name: additionalLabel }, additionalAttrsLabel); cy.wait('@patchTask').its('response.statusCode').should('equal', 200); diff --git a/tests/cypress/e2e/features/annotations_actions.js b/tests/cypress/e2e/features/annotations_actions.js index cda91f9c33ba..55fe7542c680 100644 --- a/tests/cypress/e2e/features/annotations_actions.js +++ b/tests/cypress/e2e/features/annotations_actions.js @@ -86,47 +86,6 @@ context('Testing annotations actions workflow', () => { cy.closeAnnotationsActionsModal(); }); - - it('Recommendation to save the job appears if there are unsaved changes', () => { - cy.createRectangle({ - points: 'By 2 Points', - type: 'Shape', - labelName: taskPayload.labels[0].name, - firstX: 250, - firstY: 350, - secondX: 350, - secondY: 450, - }); - - cy.openAnnotationsActionsModal(); - cy.intercept(`/api/jobs/${jobID}/annotations?**action=create**`).as('createAnnotationsRequest'); - cy.get('.cvat-action-runner-save-job-recommendation').should('exist').and('be.visible').click(); - cy.wait('@createAnnotationsRequest').its('response.statusCode').should('equal', 200); - cy.get('.cvat-action-runner-save-job-recommendation').should('not.exist'); - - cy.closeAnnotationsActionsModal(); - }); - - it('Recommendation to disable automatic saving appears in modal if automatic saving is enabled', () => { - cy.openSettings(); - cy.contains('Workspace').click(); - cy.get('.cvat-workspace-settings-auto-save').within(() => { - cy.get('[type="checkbox"]').check(); - }); - cy.closeSettings(); - - cy.openAnnotationsActionsModal(); - cy.get('.cvat-action-runner-disable-autosave-recommendation').should('exist').and('be.visible').click(); - cy.get('.cvat-action-runner-disable-autosave-recommendation').should('not.exist'); - cy.closeAnnotationsActionsModal(); - - cy.openSettings(); - cy.contains('Workspace').click(); - cy.get('.cvat-workspace-settings-auto-save').within(() => { - cy.get('[type="checkbox"]').should('not.be.checked'); - }); - cy.closeSettings(); - }); }); describe('Test action: "Remove filtered shapes"', () => { @@ -374,7 +333,7 @@ context('Testing annotations actions workflow', () => { cy.goCheckFrameNumber(latestFrameNumber); cy.get('.cvat_canvas_shape').should('have.length', 1); - cy.saveJob('PUT', 200, 'saveJob'); + cy.saveJob('PATCH', 200, 'saveJob'); const exportAnnotation = { as: 'exportAnnotations', type: 'annotations', diff --git a/tests/cypress/e2e/features/ground_truth_jobs.js b/tests/cypress/e2e/features/ground_truth_jobs.js index 0753d59839cc..482a940c3d68 100644 --- a/tests/cypress/e2e/features/ground_truth_jobs.js +++ b/tests/cypress/e2e/features/ground_truth_jobs.js @@ -4,12 +4,11 @@ /// +import { defaultTaskSpec } from '../../support/default-specs'; + context('Ground truth jobs', () => { - const caseId = 'Ground truth jobs'; const labelName = 'car'; - const taskName = `Annotation task for Case ${caseId}`; - const attrName = `Attr for Case ${caseId}`; - const textDefaultValue = 'Some default value for type Text'; + const taskName = 'Annotation task for Ground truth jobs'; const jobOptions = { jobType: 'Ground truth', @@ -17,6 +16,12 @@ context('Ground truth jobs', () => { fromTaskPage: true, }; + const defaultValidationParams = { + frameCount: 3, + mode: 'gt', + frameSelectionMethod: 'random_uniform', + }; + const groundTruthRectangles = [ { id: 1, @@ -64,8 +69,8 @@ context('Ground truth jobs', () => { let jobID = null; let taskID = null; - // With seed = 1, frameCount = 4, totalFrames = 10 - predifined ground truth frames are: - const groundTruthFrames = [0, 1, 5, 6]; + // With seed = 1, frameCount = 4, totalFrames = 100 - predifined ground truth frames are: + const groundTruthFrames = [10, 23, 71, 87]; function checkRectangleAndObjectMenu(rectangle, isGroundTruthJob = false) { if (isGroundTruthJob) { @@ -97,36 +102,33 @@ context('Ground truth jobs', () => { cy.get('.cvat-quality-control-management-tab').should('exist').and('be.visible'); } + function createAndOpenTask(serverFiles, validationParams = null) { + const { taskSpec, dataSpec, extras } = defaultTaskSpec({ + taskName, serverFiles, labelName, validationParams, + }); + return cy.headlessCreateTask(taskSpec, dataSpec, extras).then((taskResponse) => { + taskID = taskResponse.taskID; + if (validationParams) { + [groundTruthJobID, jobID] = taskResponse.jobIDs; + } else { + [jobID] = taskResponse.jobIDs; + } + }).then(() => { + cy.visit(`/tasks/${taskID}`); + cy.get('.cvat-task-details').should('exist').and('be.visible'); + }); + } + before(() => { cy.visit('auth/login'); cy.login(); }); describe('Testing ground truth basics', () => { - const imagesCount = 10; - const imageFileName = 'ground_truth_1'; - const width = 800; - const height = 800; - const posX = 10; - const posY = 10; - const color = 'gray'; - const archiveName = `${imageFileName}.zip`; - const archivePath = `cypress/fixtures/${archiveName}`; - const imagesFolder = `cypress/fixtures/${imageFileName}`; - const directoryToArchive = imagesFolder; + const serverFiles = ['bigArchive.zip']; before(() => { - cy.visit('/tasks'); - cy.imageGenerator(imagesFolder, imageFileName, width, height, color, posX, posY, labelName, imagesCount); - cy.createZipArchive(directoryToArchive, archivePath); - cy.createAnnotationTask(taskName, labelName, attrName, textDefaultValue, archiveName); - cy.openTask(taskName); - cy.url().then((url) => { - taskID = Number(url.split('/').slice(-1)[0].split('?')[0]); - }); - cy.get('.cvat-job-item').first().invoke('attr', 'data-row-id').then((val) => { - jobID = val; - }); + createAndOpenTask(serverFiles); }); after(() => { @@ -196,35 +198,80 @@ context('Ground truth jobs', () => { }); }); + describe('Testing creating task with quality params', () => { + const imagesCount = 3; + const imageFileName = `image_${taskName.replace(' ', '_').toLowerCase()}`; + const width = 800; + const height = 800; + const posX = 10; + const posY = 10; + const color = 'gray'; + const archiveName = `${imageFileName}.zip`; + const archivePath = `cypress/fixtures/${archiveName}`; + const imagesFolder = `cypress/fixtures/${imageFileName}`; + const directoryToArchive = imagesFolder; + const attrName = 'gt_attr'; + const defaultAttrValue = 'GT attr'; + const multiAttrParams = false; + const forProject = false; + const attachToProject = false; + const projectName = null; + const expectedResult = 'success'; + const projectSubsetFieldValue = null; + const advancedConfigurationParams = false; + + before(() => { + cy.contains('.cvat-header-button', 'Tasks').should('be.visible').click(); + cy.url().should('include', '/tasks'); + cy.imageGenerator(imagesFolder, imageFileName, width, height, color, posX, posY, labelName, imagesCount); + cy.createZipArchive(directoryToArchive, archivePath); + }); + + afterEach(() => { + cy.goToTaskList(); + cy.deleteTask(taskName); + }); + + function createTaskWithQualityParams(qualityParams) { + cy.createAnnotationTask( + taskName, + labelName, + attrName, + defaultAttrValue, + archiveName, + multiAttrParams, + advancedConfigurationParams, + forProject, + attachToProject, + projectName, + expectedResult, + projectSubsetFieldValue, + qualityParams, + ); + cy.openTask(taskName); + cy.get('.cvat-job-item').first() + .find('.ant-tag') + .should('have.text', 'Ground truth'); + } + + it('Create task with ground truth job', () => { + createTaskWithQualityParams({ + validationMode: 'Ground Truth', + }); + }); + + it('Create task with honeypots', () => { + createTaskWithQualityParams({ + validationMode: 'Honeypots', + }); + }); + }); + describe('Testing ground truth management basics', () => { const serverFiles = ['images/image_1.jpg', 'images/image_2.jpg', 'images/image_3.jpg']; before(() => { - cy.headlessCreateTask({ - labels: [{ name: labelName, attributes: [], type: 'any' }], - name: taskName, - project_id: null, - source_storage: { location: 'local' }, - target_storage: { location: 'local' }, - }, { - server_files: serverFiles, - image_quality: 70, - use_zip_chunks: true, - use_cache: true, - sorting_method: 'lexicographical', - }).then((taskResponse) => { - taskID = taskResponse.taskID; - [jobID] = taskResponse.jobIDs; - }).then(() => ( - cy.headlessCreateJob({ - task_id: taskID, - frame_count: 3, - type: 'ground_truth', - frame_selection_method: 'random_uniform', - }) - )).then((jobResponse) => { - groundTruthJobID = jobResponse.jobID; - }).then(() => { + createAndOpenTask(serverFiles, defaultValidationParams).then(() => { cy.visit(`/tasks/${taskID}/quality-control#management`); cy.get('.cvat-quality-control-management-tab').should('exist').and('be.visible'); cy.get('.cvat-annotations-quality-allocation-table-summary').should('exist').and('be.visible'); @@ -312,35 +359,10 @@ context('Ground truth jobs', () => { }); describe('Regression tests', () => { - const imagesCount = 20; - const imageFileName = 'ground_truth_2'; - const width = 100; - const height = 100; - const posX = 10; - const posY = 10; - const color = 'gray'; - const archiveName = `${imageFileName}.zip`; - const archivePath = `cypress/fixtures/${archiveName}`; - const imagesFolder = `cypress/fixtures/${imageFileName}`; - const directoryToArchive = imagesFolder; + const serverFiles = ['bigArchive.zip']; - before(() => { - cy.visit('/tasks'); - cy.imageGenerator(imagesFolder, imageFileName, width, height, color, posX, posY, labelName, imagesCount); - cy.createZipArchive(directoryToArchive, archivePath); - cy.createAnnotationTask( - taskName, - labelName, - attrName, - textDefaultValue, - archiveName, - false, - { multiJobs: true, segmentSize: 1 }, - ); - cy.openTask(taskName); - cy.url().then((url) => { - taskID = Number(url.split('/').slice(-1)[0].split('?')[0]); - }); + beforeEach(() => { + createAndOpenTask(serverFiles); }); afterEach(() => { @@ -378,5 +400,51 @@ context('Ground truth jobs', () => { jobID = Number(url.split('/').slice(-1)[0].split('?')[0]); }).should('match', /\/tasks\/\d+\/jobs\/\d+/); }); + + it('Check GT annotations can not be shown in standard annotation view', () => { + cy.headlessCreateJob({ + task_id: taskID, + frame_count: 4, + type: 'ground_truth', + frame_selection_method: 'random_uniform', + seed: 1, + }).then((jobResponse) => { + groundTruthJobID = jobResponse.jobID; + return cy.headlessCreateObjects(groundTruthFrames.map((frame, index) => { + const gtRect = groundTruthRectangles[index]; + return { + labelName, + objectType: 'shape', + shapeType: 'rectangle', + occluded: false, + frame, + points: [gtRect.firstX, gtRect.firstY, gtRect.secondX, gtRect.secondY], + }; + }), groundTruthJobID); + }).then(() => { + cy.visit(`/tasks/${taskID}/jobs/${jobID}`); + cy.get('.cvat-canvas-container').should('exist'); + + cy.changeWorkspace('Review'); + cy.get('.cvat-objects-sidebar-show-ground-truth').click(); + cy.get('.cvat-objects-sidebar-show-ground-truth').should( + 'have.class', 'cvat-objects-sidebar-show-ground-truth-active', + ); + groundTruthFrames.forEach((frame, index) => { + cy.goCheckFrameNumber(frame); + checkRectangleAndObjectMenu(groundTruthRectangles[index]); + }); + + cy.interactMenu('Open the task'); + cy.get('.cvat-task-job-list').within(() => { + cy.contains('a', `Job #${jobID}`).click(); + }); + groundTruthFrames.forEach((frame) => { + cy.goCheckFrameNumber(frame); + cy.get('.cvat_canvas_shape').should('not.exist'); + cy.get('.cvat-objects-sidebar-state-item').should('not.exist'); + }); + }); + }); }); }); diff --git a/tests/cypress/e2e/features/masks_basics.js b/tests/cypress/e2e/features/masks_basics.js index b39d6ea769d7..ac7a1358e231 100644 --- a/tests/cypress/e2e/features/masks_basics.js +++ b/tests/cypress/e2e/features/masks_basics.js @@ -156,6 +156,12 @@ context('Manipulations with masks', { scrollBehavior: false }, () => { cy.interactAnnotationObjectMenu('#cvat-objects-sidebar-state-item-1', 'Edit'); cy.drawMask(editingActions); + + // Check issue fixed in https://github.com/cvat-ai/cvat/pull/8598 + // Frames navigation should not work during editing + cy.get('.cvat-player-next-button').click(); + cy.checkFrameNum(0); + cy.finishMaskDrawing(); }); @@ -220,6 +226,59 @@ context('Manipulations with masks', { scrollBehavior: false }, () => { cy.get('body').type('n'); cy.get('.cvat-brush-tools-toolbox').should('not.be.visible'); }); + + it('Check hide mask feature', () => { + function checkHideFeature() { + cy.get('.cvat-brush-tools-hide').click(); + cy.get('.cvat-brush-tools-hide').should('have.class', 'cvat-brush-tools-active-tool'); + cy.get('.cvat_masks_canvas_wrapper').should('not.be.visible'); + cy.get('.cvat-brush-tools-hide').click(); + cy.get('.cvat_masks_canvas_wrapper').should('be.visible'); + } + + function checkHideShortcut() { + cy.get('body').type('h'); + cy.get('.cvat-brush-tools-hide').should('have.class', 'cvat-brush-tools-active-tool'); + cy.get('.cvat_masks_canvas_wrapper').should('not.be.visible'); + } + + function checkObjectIsHidden() { + cy.get('#cvat-objects-sidebar-state-item-1').within(() => { + cy.get('.cvat-object-item-button-hidden-enabled').should('exist'); + }); + } + + const mask = [{ + method: 'brush', + coordinates: [[450, 250], [600, 400]], + }]; + const drawPolygon = [{ + method: 'polygon-plus', + coordinates: [[450, 210], [650, 400], [450, 600], [260, 400]], + }]; + cy.startMaskDrawing(); + cy.drawMask(mask); + + checkHideFeature(); + checkHideShortcut(); + + cy.finishMaskDrawing(); + cy.get('#cvat_canvas_shape_1').should('be.visible'); + + cy.interactAnnotationObjectMenu('#cvat-objects-sidebar-state-item-1', 'Edit'); + checkHideFeature(); + + cy.drawMask(drawPolygon); + checkHideShortcut(); + cy.get('.cvat_canvas_shape_drawing') + .invoke('attr', 'fill-opacity') + .then((opacity) => expect(+opacity).to.be.equal(0)); + checkObjectIsHidden(); + cy.get('.cvat-brush-tools-brush').click(); + cy.get('.cvat-brush-tools-brush').should('have.class', 'cvat-brush-tools-active-tool'); + cy.finishMaskDrawing(); + checkObjectIsHidden(); + }); }); describe('Tests to make sure that empty masks cannot be created', () => { diff --git a/tests/cypress/e2e/features/requests_page.js b/tests/cypress/e2e/features/requests_page.js index 3cdf187a9825..f78554986356 100644 --- a/tests/cypress/e2e/features/requests_page.js +++ b/tests/cypress/e2e/features/requests_page.js @@ -322,9 +322,11 @@ context('Requests page', () => { cy.getJobIDFromIdx(0).then((jobID) => { const closeExportNotification = () => { - cy.contains('Export is finished').should('be.visible'); - cy.contains('Export is finished').parents('.ant-notification-notice') - .find('span[aria-label="close"]').click(); + cy.get('.ant-notification-notice').first().within((notification) => { + cy.contains('Export is finished').should('be.visible'); + cy.get('span[aria-label="close"]').click(); + cy.wrap(notification).should('not.exist'); + }); }; const exportParams = { @@ -357,5 +359,24 @@ context('Requests page', () => { }); }); }); + + it('Export task. Request for status fails, UI is not crushing', () => { + cy.intercept('GET', '/api/requests/**', { + statusCode: 500, + body: 'Network error', + }); + + cy.exportTask({ + type: 'annotations', + format: exportFormat, + archiveCustomName: annotationsArchiveNameLocal, + }); + + cy.contains('Could not export dataset').should('be.visible'); + cy.closeNotification('.ant-notification-notice-error'); + + cy.contains('.cvat-header-button', 'Requests').should('be.visible').click(); + cy.get('.cvat-requests-page').should('be.visible'); + }); }); }); diff --git a/tests/cypress/e2e/issues_prs/issue_1922_error_canvas_is_busy_at_resize_element.js b/tests/cypress/e2e/issues_prs/issue_1922_error_canvas_is_busy_at_resize_element.js deleted file mode 100644 index 53b5606f457b..000000000000 --- a/tests/cypress/e2e/issues_prs/issue_1922_error_canvas_is_busy_at_resize_element.js +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2020-2022 Intel Corporation -// Copyright (C) 2023 CVAT.ai Corporation -// -// SPDX-License-Identifier: MIT - -/// - -import { taskName, labelName } from '../../support/const'; - -context('Check error canvas is busy at resize element', () => { - const issueId = '1922'; - const createRectangleShape2Points = { - points: 'By 2 Points', - type: 'Shape', - labelName, - firstX: 100, - firstY: 100, - secondX: 300, - secondY: 300, - }; - - before(() => { - cy.openTaskJob(taskName); - }); - - describe(`Testing issue "${issueId}"`, () => { - it('Create an object in first frame', () => { - cy.createRectangle(createRectangleShape2Points); - }); - - it('Go to next frame and create an object in second frame', () => { - cy.get('.cvat-player-next-button').click(); - cy.createRectangle(createRectangleShape2Points); - }); - - it('Switching mode of button on "back with a filter"', () => { - cy.get('.cvat-player-previous-button').rightclick(); - cy.get('.cvat-player-previous-filtered-inlined-button').click(); - }); - - it('Resize element on second frame and go to previous frame at resizing element', () => { - const { secondX, secondY } = createRectangleShape2Points; - cy.get('.cvat-canvas-container').trigger('mousemove', secondX - 10, secondY - 10); // activate second shape - cy.get('.cvat-canvas-container').trigger('mousedown', secondX, secondY, { button: 0 }); - cy.get('.cvat-canvas-container').trigger('mousemove', secondX + 100, secondY + 100); - cy.get('body').type('d'); // go to previous frame - cy.get('body').trigger('mouseup'); - }); - - it('Page with the error is missing', () => { - cy.get('.cvat-global-boundary').should('not.exist'); - }); - }); -}); diff --git a/tests/cypress/support/commands.js b/tests/cypress/support/commands.js index 76f7ffe2640c..9941a9b0d5c3 100644 --- a/tests/cypress/support/commands.js +++ b/tests/cypress/support/commands.js @@ -178,6 +178,7 @@ Cypress.Commands.add( projectName = '', expectedResult = 'success', projectSubsetFieldValue = 'Test', + qualityConfigurationParams = null, ) => { cy.url().then(() => { cy.get('.cvat-create-task-dropdown').click(); @@ -215,6 +216,9 @@ Cypress.Commands.add( if (advancedConfigurationParams) { cy.advancedConfiguration(advancedConfigurationParams); } + if (qualityConfigurationParams) { + cy.configureTaskQualityMode(qualityConfigurationParams); + } cy.get('.cvat-submit-continue-task-button').scrollIntoView(); cy.get('.cvat-submit-continue-task-button').click(); if (expectedResult === 'success') { @@ -291,7 +295,7 @@ Cypress.Commands.add('headlessCreateObjects', (objects, jobID) => { }); }); -Cypress.Commands.add('headlessCreateTask', (taskSpec, dataSpec) => { +Cypress.Commands.add('headlessCreateTask', (taskSpec, dataSpec, extras) => { cy.window().then(async ($win) => { const task = new $win.cvat.classes.Task({ ...taskSpec, @@ -310,7 +314,7 @@ Cypress.Commands.add('headlessCreateTask', (taskSpec, dataSpec) => { task.remoteFiles = dataSpec.remote_files; } - const result = await task.save(); + const result = await task.save(extras || {}); return cy.wrap({ taskID: result.id, jobIDs: result.jobs.map((job) => job.id) }); }); }); @@ -897,6 +901,23 @@ Cypress.Commands.add('advancedConfiguration', (advancedConfigurationParams) => { } }); +Cypress.Commands.add('configureTaskQualityMode', (qualityConfigurationParams) => { + cy.contains('Quality').click(); + if (qualityConfigurationParams.validationMode) { + cy.get('#validationMode').within(() => { + cy.contains(qualityConfigurationParams.validationMode).click(); + }); + } + if (qualityConfigurationParams.validationFramesPercent) { + cy.get('#validationFramesPercent').clear(); + cy.get('#validationFramesPercent').type(qualityConfigurationParams.validationFramesPercent); + } + if (qualityConfigurationParams.validationFramesPerJobPercent) { + cy.get('#validationFramesPerJobPercent').clear(); + cy.get('#validationFramesPerJobPercent').type(qualityConfigurationParams.validationFramesPerJobPercent); + } +}); + Cypress.Commands.add('removeAnnotations', () => { cy.contains('.cvat-annotation-header-button', 'Menu').click(); cy.get('.cvat-annotation-menu').within(() => { @@ -1272,7 +1293,7 @@ Cypress.Commands.add('exportTask', ({ cy.get('.cvat-cloud-storage-select-provider').click(); } } - cy.contains('button', 'OK').click(); + cy.contains('.cvat-modal-export-task button', 'OK').click(); cy.get('.cvat-notification-notice-export-task-start').should('be.visible'); cy.closeNotification('.cvat-notification-notice-export-task-start'); }); @@ -1565,6 +1586,7 @@ Cypress.Commands.add('startMaskDrawing', () => { Cypress.Commands.add('finishMaskDrawing', () => { cy.get('.cvat-brush-tools-brush').click(); cy.get('.cvat-brush-tools-finish').click(); + cy.hideTooltips(); }); Cypress.Commands.add('sliceShape', ( diff --git a/tests/cypress/support/default-specs.js b/tests/cypress/support/default-specs.js new file mode 100644 index 000000000000..5e59afff47d1 --- /dev/null +++ b/tests/cypress/support/default-specs.js @@ -0,0 +1,77 @@ +// Copyright (C) 2024 CVAT.ai Corporation +// +// SPDX-License-Identifier: MIT + +function defaultTaskSpec({ + labelName, + labelType, + taskName, + serverFiles, + startFrame, + frameFilter, + segmentSize, + validationParams, +}) { + const taskSpec = { + labels: [ + { name: labelName, attributes: [], type: labelType || 'any' }, + ], + name: taskName, + project_id: null, + source_storage: { location: 'local' }, + target_storage: { location: 'local' }, + }; + + if (segmentSize) { + taskSpec.segment_size = segmentSize; + } + + const dataSpec = { + server_files: serverFiles, + image_quality: 70, + use_zip_chunks: true, + use_cache: true, + sorting_method: (validationParams && validationParams.mode === 'gt_pool') ? 'random' : 'lexicographical', + }; + if (startFrame) { + dataSpec.start_frame = startFrame; + } + if (frameFilter) { + dataSpec.frame_filter = frameFilter; + } + + const extras = {}; + if (validationParams) { + const convertedParams = {}; + if (validationParams.frames) { + convertedParams.frames = validationParams.frames; + } + if (validationParams.frameSelectionMethod) { + convertedParams.frame_selection_method = validationParams.frameSelectionMethod; + } + if (validationParams.frameCount) { + convertedParams.frame_count = validationParams.frameCount; + } + if (validationParams.framesPerJobCount) { + convertedParams.frames_per_job_count = validationParams.framesPerJobCount; + } + if (validationParams.mode) { + convertedParams.mode = validationParams.mode; + } + if (validationParams.randomSeed) { + convertedParams.random_seed = validationParams.randomSeed; + } + + extras.validation_params = convertedParams; + } + + return { + taskSpec, + dataSpec, + extras, + }; +} + +module.exports = { + defaultTaskSpec, +}; diff --git a/tests/cypress_canvas3d.config.js b/tests/cypress_canvas3d.config.js index e1cd5ede69f2..f542fe78bde9 100644 --- a/tests/cypress_canvas3d.config.js +++ b/tests/cypress_canvas3d.config.js @@ -1,3 +1,7 @@ +// Copyright (C) 2024 CVAT.ai Corporation +// +// SPDX-License-Identifier: MIT + const { defineConfig } = require('cypress'); const baseConfig = require('./cypress.base.config'); diff --git a/tests/docker-compose.file_share.yml b/tests/docker-compose.file_share.yml index 3ceeb355f687..bca485ad48c8 100644 --- a/tests/docker-compose.file_share.yml +++ b/tests/docker-compose.file_share.yml @@ -5,3 +5,6 @@ services: cvat_server: volumes: - ./tests/mounted_file_share:/home/django/share:rw + cvat_worker_chunks: + volumes: + - ./tests/mounted_file_share:/home/django/share:rw diff --git a/tests/docker-compose.minio.yml b/tests/docker-compose.minio.yml index 6f82aadd1806..6089aa69f8bf 100644 --- a/tests/docker-compose.minio.yml +++ b/tests/docker-compose.minio.yml @@ -8,6 +8,7 @@ services: cvat_server: *allow-minio cvat_worker_export: *allow-minio cvat_worker_import: *allow-minio + cvat_worker_chunks: *allow-minio minio: image: quay.io/minio/minio:RELEASE.2022-09-17T00-09-45Z diff --git a/tests/python/README.md b/tests/python/README.md index 74373153085b..3a7b246c5508 100644 --- a/tests/python/README.md +++ b/tests/python/README.md @@ -20,13 +20,15 @@ the server calling REST API directly (as it done by users). ## How to run? **Initial steps** +1. On Debian/Ubuntu, make sure that your `$USER` is in `docker` group: + ```shell + sudo usermod -aG docker $USER + ``` 1. Follow [this guide](../../site/content/en/docs/api_sdk/sdk/developer-guide.md) to prepare `cvat-sdk` and `cvat-cli` source code 1. Install all necessary requirements before running REST API tests: - ``` + ```shell pip install -r ./tests/python/requirements.txt - pip install -e ./cvat-sdk - pip install -e ./cvat-cli ``` 1. Stop any other CVAT containers which you run previously. They keep ports which are used by containers for the testing system. diff --git a/tests/python/cli/cmtp_function.py b/tests/python/cli/cmtp_function.py new file mode 100644 index 000000000000..2ae5cb26f663 --- /dev/null +++ b/tests/python/cli/cmtp_function.py @@ -0,0 +1,22 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import cvat_sdk.auto_annotation as cvataa +import cvat_sdk.models as models +import PIL.Image + +spec = cvataa.DetectionFunctionSpec( + labels=[ + cvataa.label_spec("car", 0), + ], +) + + +def detect( + context: cvataa.DetectionFunctionContext, image: PIL.Image.Image +) -> list[models.LabeledShapeRequest]: + if context.conv_mask_to_poly: + return [cvataa.polygon(0, [0, 0, 0, 1, 1, 1])] + else: + return [cvataa.mask(0, [1, 0, 0, 0, 0])] diff --git a/tests/python/cli/conf_threshold_function.py b/tests/python/cli/conf_threshold_function.py new file mode 100644 index 000000000000..bcb1add2d660 --- /dev/null +++ b/tests/python/cli/conf_threshold_function.py @@ -0,0 +1,21 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import cvat_sdk.auto_annotation as cvataa +import cvat_sdk.models as models +import PIL.Image + +spec = cvataa.DetectionFunctionSpec( + labels=[ + cvataa.label_spec("car", 0), + ], +) + + +def detect( + context: cvataa.DetectionFunctionContext, image: PIL.Image.Image +) -> list[models.LabeledShapeRequest]: + return [ + cvataa.rectangle(0, [context.conf_threshold, 1, 1, 1]), + ] diff --git a/tests/python/cli/conftest.py b/tests/python/cli/conftest.py index c36974b6ca5d..c7a8fe7da4db 100644 --- a/tests/python/cli/conftest.py +++ b/tests/python/cli/conftest.py @@ -2,4 +2,4 @@ # # SPDX-License-Identifier: MIT -from sdk.fixtures import fxt_client # pylint: disable=unused-import +from sdk.fixtures import * # pylint: disable=unused-import diff --git a/tests/python/cli/example_function.py b/tests/python/cli/example_function.py index 4b1b41857825..57d67a5b40a2 100644 --- a/tests/python/cli/example_function.py +++ b/tests/python/cli/example_function.py @@ -2,8 +2,6 @@ # # SPDX-License-Identifier: MIT -from typing import List - import cvat_sdk.auto_annotation as cvataa import cvat_sdk.models as models import PIL.Image @@ -17,7 +15,7 @@ def detect( context: cvataa.DetectionFunctionContext, image: PIL.Image.Image -) -> List[models.LabeledShapeRequest]: +) -> list[models.LabeledShapeRequest]: return [ cvataa.rectangle(0, [1, 2, 3, 4]), ] diff --git a/tests/python/cli/example_parameterized_function.py b/tests/python/cli/example_parameterized_function.py index 29d9038e78b4..e46d40867a4c 100644 --- a/tests/python/cli/example_parameterized_function.py +++ b/tests/python/cli/example_parameterized_function.py @@ -3,7 +3,6 @@ # SPDX-License-Identifier: MIT from types import SimpleNamespace as namespace -from typing import List import cvat_sdk.auto_annotation as cvataa import cvat_sdk.models as models @@ -24,7 +23,7 @@ def create(s: str, i: int, f: float, b: bool) -> cvataa.DetectionFunction: def detect( context: cvataa.DetectionFunctionContext, image: PIL.Image.Image - ) -> List[models.LabeledShapeRequest]: + ) -> list[models.LabeledShapeRequest]: return [ cvataa.rectangle(0, [1, 2, 3, 4]), ] diff --git a/tests/python/cli/self-signed.crt b/tests/python/cli/self-signed.crt new file mode 100644 index 000000000000..815373bf286c --- /dev/null +++ b/tests/python/cli/self-signed.crt @@ -0,0 +1,9 @@ +-----BEGIN CERTIFICATE----- +MIIBPDCB76ADAgECAhQksQwFGcyVwF0+gIOPMPBB+/NjNTAFBgMrZXAwFDESMBAG +A1UEAwwJbG9jYWxob3N0MB4XDTI0MTAyODEyMTkyNFoXDTI0MTAyOTEyMTkyNFow +FDESMBAGA1UEAwwJbG9jYWxob3N0MCowBQYDK2VwAyEAzGOv96vkrHr0GPcWL7vN +8mgR4XMg9ItNpJ2nbMmjYCKjUzBRMB0GA1UdDgQWBBR6Hn0aG/ZGAJjY9HIUK7El +84qAgzAfBgNVHSMEGDAWgBR6Hn0aG/ZGAJjY9HIUK7El84qAgzAPBgNVHRMBAf8E +BTADAQH/MAUGAytlcANBAMj2zWdIa8oOiEtUWFMv+KYf1kyP1lUnlcC2xUpOj8d3 +kRYtlRX4E7F5zzzgKgNpbanRAg72qnqPiFAFCGVAhgY= +-----END CERTIFICATE----- diff --git a/tests/python/cli/self-signed.key b/tests/python/cli/self-signed.key new file mode 100644 index 000000000000..f81d8519bcac --- /dev/null +++ b/tests/python/cli/self-signed.key @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEIKe5zj/UrVJ/LySjKm9BBVHXziqFIwJ6w+HuTHnldCLo +-----END PRIVATE KEY----- diff --git a/tests/python/cli/test_cli_misc.py b/tests/python/cli/test_cli_misc.py new file mode 100644 index 000000000000..ea4a3f380430 --- /dev/null +++ b/tests/python/cli/test_cli_misc.py @@ -0,0 +1,94 @@ +# Copyright (C) 2022-2023 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import json +import os + +import packaging.version as pv +import pytest +from cvat_sdk import Client +from cvat_sdk.api_client import models +from cvat_sdk.core.proxies.tasks import ResourceType + +from .util import TestCliBase, generate_images, https_reverse_proxy, run_cli + + +class TestCliMisc(TestCliBase): + def test_can_warn_on_mismatching_server_version(self, monkeypatch, caplog): + def mocked_version(_): + return pv.Version("0") + + # We don't actually run a separate process in the tests here, so it works + monkeypatch.setattr(Client, "get_server_version", mocked_version) + + self.run_cli("task", "ls") + + assert "Server version '0' is not compatible with SDK version" in caplog.text + + @pytest.mark.parametrize("verify", [True, False]) + def test_can_control_ssl_verification_with_arg(self, verify: bool): + with https_reverse_proxy() as proxy_url: + if verify: + insecure_args = [] + else: + insecure_args = ["--insecure"] + + run_cli( + self, + f"--auth={self.user}:{self.password}", + f"--server-host={proxy_url}", + *insecure_args, + "task", + "ls", + expected_code=1 if verify else 0, + ) + stdout = self.stdout.getvalue() + + if not verify: + for line in stdout.splitlines(): + int(line) + + def test_can_control_organization_context(self): + org = "cli-test-org" + self.client.organizations.create(models.OrganizationWriteRequest(org)) + + files = generate_images(self.tmp_path, 1) + + stdout = self.run_cli( + "task", + "create", + "personal_task", + ResourceType.LOCAL.name, + *map(os.fspath, files), + "--labels=" + json.dumps([{"name": "person"}]), + "--completion_verification_period=0.01", + organization="", + ) + + personal_task_id = int(stdout.split()[-1]) + + stdout = self.run_cli( + "task", + "create", + "org_task", + ResourceType.LOCAL.name, + *map(os.fspath, files), + "--labels=" + json.dumps([{"name": "person"}]), + "--completion_verification_period=0.01", + organization=org, + ) + + org_task_id = int(stdout.split()[-1]) + + personal_task_ids = list(map(int, self.run_cli("task", "ls", organization="").split())) + assert personal_task_id in personal_task_ids + assert org_task_id not in personal_task_ids + + org_task_ids = list(map(int, self.run_cli("task", "ls", organization=org).split())) + assert personal_task_id not in org_task_ids + assert org_task_id in org_task_ids + + all_task_ids = list(map(int, self.run_cli("task", "ls").split())) + assert personal_task_id in all_task_ids + assert org_task_id in all_task_ids diff --git a/tests/python/cli/test_cli_projects.py b/tests/python/cli/test_cli_projects.py new file mode 100644 index 000000000000..032085b52d56 --- /dev/null +++ b/tests/python/cli/test_cli_projects.py @@ -0,0 +1,77 @@ +# Copyright (C) 2022-2023 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import json +import os + +import pytest +from cvat_sdk.api_client import exceptions +from cvat_sdk.core.proxies.projects import Project + +from .util import TestCliBase + + +class TestCliProjects(TestCliBase): + @pytest.fixture + def fxt_new_project(self): + project = self.client.projects.create( + spec={ + "name": "test_project", + "labels": [{"name": "car"}, {"name": "person"}], + }, + ) + + return project + + def test_can_create_project(self): + stdout = self.run_cli( + "project", + "create", + "new_project", + "--labels", + json.dumps([{"name": "car"}, {"name": "person"}]), + "--bug_tracker", + "https://bugs.example/", + ) + + project_id = int(stdout.rstrip("\n")) + created_project = self.client.projects.retrieve(project_id) + assert created_project.name == "new_project" + assert created_project.bug_tracker == "https://bugs.example/" + assert {label.name for label in created_project.get_labels()} == {"car", "person"} + + def test_can_create_project_from_dataset(self, fxt_coco_dataset): + stdout = self.run_cli( + "project", + "create", + "new_project", + "--dataset_path", + os.fspath(fxt_coco_dataset), + "--dataset_format", + "COCO 1.0", + ) + + project_id = int(stdout.rstrip("\n")) + created_project = self.client.projects.retrieve(project_id) + assert created_project.name == "new_project" + assert {label.name for label in created_project.get_labels()} == {"car", "person"} + assert created_project.tasks.count == 1 + + def test_can_list_projects_in_simple_format(self, fxt_new_project: Project): + output = self.run_cli("project", "ls") + + results = output.split("\n") + assert any(str(fxt_new_project.id) in r for r in results) + + def test_can_list_project_in_json_format(self, fxt_new_project: Project): + output = self.run_cli("project", "ls", "--json") + + results = json.loads(output) + assert any(r["id"] == fxt_new_project.id for r in results) + + def test_can_delete_project(self, fxt_new_project: Project): + self.run_cli("project", "delete", str(fxt_new_project.id)) + + with pytest.raises(exceptions.NotFoundException): + fxt_new_project.fetch() diff --git a/tests/python/cli/test_cli.py b/tests/python/cli/test_cli_tasks.py similarity index 60% rename from tests/python/cli/test_cli.py rename to tests/python/cli/test_cli_tasks.py index 364c7011e7ca..d0af410a7c99 100644 --- a/tests/python/cli/test_cli.py +++ b/tests/python/cli/test_cli_tasks.py @@ -2,48 +2,22 @@ # # SPDX-License-Identifier: MIT -import io import json import os from pathlib import Path -from typing import Optional -import packaging.version as pv import pytest -from cvat_cli.cli import CLI -from cvat_sdk import Client, make_client -from cvat_sdk.api_client import exceptions, models +from cvat_sdk.api_client import exceptions from cvat_sdk.core.proxies.tasks import ResourceType, Task from PIL import Image from sdk.util import generate_coco_json -from shared.utils.config import BASE_URL, USER_PASS from shared.utils.helpers import generate_image_file -from .util import generate_images, run_cli - - -class TestCLI: - @pytest.fixture(autouse=True) - def setup( - self, - restore_db_per_function, # force fixture call order to allow DB setup - fxt_stdout: io.StringIO, - tmp_path: Path, - admin_user: str, - ): - self.tmp_path = tmp_path - self.stdout = fxt_stdout - self.host, self.port = BASE_URL.rsplit(":", maxsplit=1) - self.user = admin_user - self.password = USER_PASS - self.client = make_client( - host=self.host, port=self.port, credentials=(self.user, self.password) - ) - self.client.config.status_check_period = 0.01 +from .util import TestCliBase, generate_images - yield +class TestCliTasks(TestCliBase): @pytest.fixture def fxt_image_file(self): img_path = self.tmp_path / "img_0.png" @@ -85,31 +59,11 @@ def fxt_new_task(self): return task - def run_cli( - self, cmd: str, *args: str, expected_code: int = 0, organization: Optional[str] = None - ) -> str: - common_args = [ - f"--auth={self.user}:{self.password}", - f"--server-host={self.host}", - f"--server-port={self.port}", - ] - - if organization is not None: - common_args.append(f"--organization={organization}") - - run_cli( - self, - *common_args, - cmd, - *args, - expected_code=expected_code, - ) - return self.stdout.getvalue() - def test_can_create_task_from_local_images(self): files = generate_images(self.tmp_path, 5) stdout = self.run_cli( + "task", "create", "test_task", ResourceType.LOCAL.name, @@ -120,7 +74,7 @@ def test_can_create_task_from_local_images(self): "0.01", ) - task_id = int(stdout.split()[-1]) + task_id = int(stdout.rstrip("\n")) assert self.client.tasks.retrieve(task_id).size == 5 def test_can_create_task_from_local_images_with_parameters(self): @@ -131,6 +85,7 @@ def test_can_create_task_from_local_images_with_parameters(self): frame_step = 3 stdout = self.run_cli( + "task", "create", "test_task", ResourceType.LOCAL.name, @@ -147,7 +102,7 @@ def test_can_create_task_from_local_images_with_parameters(self): "http://localhost/bug", ) - task_id = int(stdout.split()[-1]) + task_id = int(stdout.rstrip("\n")) task = self.client.tasks.retrieve(task_id) frames = task.get_frames_info() assert [f.name for f in frames] == [ @@ -157,19 +112,19 @@ def test_can_create_task_from_local_images_with_parameters(self): assert task.bug_tracker == "http://localhost/bug" def test_can_list_tasks_in_simple_format(self, fxt_new_task: Task): - output = self.run_cli("ls") + output = self.run_cli("task", "ls") results = output.split("\n") assert any(str(fxt_new_task.id) in r for r in results) def test_can_list_tasks_in_json_format(self, fxt_new_task: Task): - output = self.run_cli("ls", "--json") + output = self.run_cli("task", "ls", "--json") results = json.loads(output) assert any(r["id"] == fxt_new_task.id for r in results) def test_can_delete_task(self, fxt_new_task: Task): - self.run_cli("delete", str(fxt_new_task.id)) + self.run_cli("task", "delete", str(fxt_new_task.id)) with pytest.raises(exceptions.NotFoundException): fxt_new_task.fetch() @@ -177,7 +132,8 @@ def test_can_delete_task(self, fxt_new_task: Task): def test_can_download_task_annotations(self, fxt_new_task: Task): filename = self.tmp_path / "task_{fxt_new_task.id}-cvat.zip" self.run_cli( - "dump", + "task", + "export-dataset", str(fxt_new_task.id), str(filename), "--format", @@ -193,7 +149,8 @@ def test_can_download_task_annotations(self, fxt_new_task: Task): def test_can_download_task_backup(self, fxt_new_task: Task): filename = self.tmp_path / "task_{fxt_new_task.id}-cvat.zip" self.run_cli( - "export", + "task", + "backup", str(fxt_new_task.id), str(filename), "--completion_verification_period", @@ -206,6 +163,7 @@ def test_can_download_task_backup(self, fxt_new_task: Task): def test_can_download_task_frames(self, fxt_new_task: Task, quality: str): out_dir = str(self.tmp_path / "downloads") self.run_cli( + "task", "frames", str(fxt_new_task.id), "0", @@ -221,93 +179,29 @@ def test_can_download_task_frames(self, fxt_new_task: Task, quality: str): } def test_can_upload_annotations(self, fxt_new_task: Task, fxt_coco_file: Path): - self.run_cli("upload", str(fxt_new_task.id), str(fxt_coco_file), "--format", "COCO 1.0") + self.run_cli( + "task", + "import-dataset", + str(fxt_new_task.id), + str(fxt_coco_file), + "--format", + "COCO 1.0", + ) def test_can_create_from_backup(self, fxt_new_task: Task, fxt_backup_file: Path): - stdout = self.run_cli("import", str(fxt_backup_file)) + stdout = self.run_cli("task", "create-from-backup", str(fxt_backup_file)) - task_id = int(stdout.split()[-1]) + task_id = int(stdout.rstrip("\n")) assert task_id assert task_id != fxt_new_task.id assert self.client.tasks.retrieve(task_id).size == fxt_new_task.size - def test_can_warn_on_mismatching_server_version(self, monkeypatch, caplog): - def mocked_version(_): - return pv.Version("0") - - # We don't actually run a separate process in the tests here, so it works - monkeypatch.setattr(Client, "get_server_version", mocked_version) - - self.run_cli("ls") - - assert "Server version '0' is not compatible with SDK version" in caplog.text - - @pytest.mark.parametrize("verify", [True, False]) - def test_can_control_ssl_verification_with_arg(self, monkeypatch, verify: bool): - # TODO: Very hacky implementation, improve it, if possible - class MyException(Exception): - pass - - normal_init = CLI.__init__ - - def my_init(self, *args, **kwargs): - normal_init(self, *args, **kwargs) - raise MyException(self.client.api_client.configuration.verify_ssl) - - monkeypatch.setattr(CLI, "__init__", my_init) - - with pytest.raises(MyException) as capture: - self.run_cli(*(["--insecure"] if not verify else []), "ls") - - assert capture.value.args[0] == verify - - def test_can_control_organization_context(self): - org = "cli-test-org" - self.client.organizations.create(models.OrganizationWriteRequest(org)) - - files = generate_images(self.tmp_path, 1) - - stdout = self.run_cli( - "create", - "personal_task", - ResourceType.LOCAL.name, - *map(os.fspath, files), - "--labels=" + json.dumps([{"name": "person"}]), - "--completion_verification_period=0.01", - organization="", - ) - - personal_task_id = int(stdout.split()[-1]) - - stdout = self.run_cli( - "create", - "org_task", - ResourceType.LOCAL.name, - *map(os.fspath, files), - "--labels=" + json.dumps([{"name": "person"}]), - "--completion_verification_period=0.01", - organization=org, - ) - - org_task_id = int(stdout.split()[-1]) - - personal_task_ids = list(map(int, self.run_cli("ls", organization="").split())) - assert personal_task_id in personal_task_ids - assert org_task_id not in personal_task_ids - - org_task_ids = list(map(int, self.run_cli("ls", organization=org).split())) - assert personal_task_id not in org_task_ids - assert org_task_id in org_task_ids - - all_task_ids = list(map(int, self.run_cli("ls").split())) - assert personal_task_id in all_task_ids - assert org_task_id in all_task_ids - def test_auto_annotate_with_module(self, fxt_new_task: Task): annotations = fxt_new_task.get_annotations() assert not annotations.shapes self.run_cli( + "task", "auto-annotate", str(fxt_new_task.id), f"--function-module={__package__}.example_function", @@ -321,6 +215,7 @@ def test_auto_annotate_with_file(self, fxt_new_task: Task): assert not annotations.shapes self.run_cli( + "task", "auto-annotate", str(fxt_new_task.id), f"--function-file={Path(__file__).with_name('example_function.py')}", @@ -334,6 +229,7 @@ def test_auto_annotate_with_parameters(self, fxt_new_task: Task): assert not annotations.shapes self.run_cli( + "task", "auto-annotate", str(fxt_new_task.id), f"--function-module={__package__}.example_parameterized_function", @@ -345,3 +241,50 @@ def test_auto_annotate_with_parameters(self, fxt_new_task: Task): annotations = fxt_new_task.get_annotations() assert annotations.shapes + + def test_auto_annotate_with_threshold(self, fxt_new_task: Task): + annotations = fxt_new_task.get_annotations() + assert not annotations.shapes + + self.run_cli( + "task", + "auto-annotate", + str(fxt_new_task.id), + f"--function-module={__package__}.conf_threshold_function", + "--conf-threshold=0.75", + ) + + annotations = fxt_new_task.get_annotations() + assert annotations.shapes[0].points[0] == 0.75 # python:S1244 NOSONAR + + def test_auto_annotate_with_cmtp(self, fxt_new_task: Task): + self.run_cli( + "task", + "auto-annotate", + str(fxt_new_task.id), + f"--function-module={__package__}.cmtp_function", + "--clear-existing", + ) + + annotations = fxt_new_task.get_annotations() + assert annotations.shapes[0].type.value == "mask" + + self.run_cli( + "task", + "auto-annotate", + str(fxt_new_task.id), + f"--function-module={__package__}.cmtp_function", + "--clear-existing", + "--conv-mask-to-poly", + ) + + annotations = fxt_new_task.get_annotations() + assert annotations.shapes[0].type.value == "polygon" + + def test_legacy_alias(self, caplog): + # All legacy aliases are implemented the same way; + # no need to test every single one. + self.run_cli("ls") + + assert "deprecated" in caplog.text + assert "task ls" in caplog.text diff --git a/tests/python/cli/util.py b/tests/python/cli/util.py index 034d5d073ace..0a0093475171 100644 --- a/tests/python/cli/util.py +++ b/tests/python/cli/util.py @@ -3,10 +3,21 @@ # SPDX-License-Identifier: MIT +import contextlib +import http.server +import io +import ssl +import threading import unittest +from collections.abc import Generator from pathlib import Path -from typing import Any, List, Union +from typing import Any, Optional, Union +import pytest +import requests +from cvat_sdk import make_client + +from shared.utils.config import BASE_URL, USER_PASS from shared.utils.helpers import generate_image_file @@ -21,7 +32,7 @@ def run_cli(test: Union[unittest.TestCase, Any], *args: str, expected_code: int assert expected_code == main(args) -def generate_images(dst_dir: Path, count: int) -> List[Path]: +def generate_images(dst_dir: Path, count: int) -> list[Path]: filenames = [] dst_dir.mkdir(parents=True, exist_ok=True) for i in range(count): @@ -29,3 +40,95 @@ def generate_images(dst_dir: Path, count: int) -> List[Path]: filename.write_bytes(generate_image_file(filename.name).getvalue()) filenames.append(filename) return filenames + + +@contextlib.contextmanager +def https_reverse_proxy() -> Generator[str, None, None]: + ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) + ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2 + cert_dir = Path(__file__).parent + ssl_context.load_cert_chain(cert_dir / "self-signed.crt", cert_dir / "self-signed.key") + + with http.server.HTTPServer(("localhost", 0), _ProxyHttpRequestHandler) as proxy_server: + proxy_server.socket = ssl_context.wrap_socket( + proxy_server.socket, + server_side=True, + ) + server_thread = threading.Thread(target=proxy_server.serve_forever) + server_thread.start() + try: + yield f"https://localhost:{proxy_server.server_port}" + finally: + proxy_server.shutdown() + server_thread.join() + + +class _ProxyHttpRequestHandler(http.server.BaseHTTPRequestHandler): + def do_GET(self): + response = requests.get(**self._shared_request_args()) + self._translate_response(response) + + def do_POST(self): + body_length = int(self.headers["Content-Length"]) + + response = requests.post(data=self.rfile.read(body_length), **self._shared_request_args()) + self._translate_response(response) + + def _shared_request_args(self) -> dict[str, Any]: + headers = {k.lower(): v for k, v in self.headers.items()} + del headers["host"] + + return {"url": BASE_URL + self.path, "headers": headers, "timeout": 60, "stream": True} + + def _translate_response(self, response: requests.Response) -> None: + self.send_response(response.status_code) + for key, value in response.headers.items(): + self.send_header(key, value) + self.end_headers() + # Need to use raw here to prevent requests from handling Content-Encoding. + self.wfile.write(response.raw.read()) + + +class TestCliBase: + @pytest.fixture(autouse=True) + def setup( + self, + restore_db_per_function, # force fixture call order to allow DB setup + restore_redis_inmem_per_function, + restore_redis_ondisk_per_function, + fxt_stdout: io.StringIO, + tmp_path: Path, + admin_user: str, + ): + self.tmp_path = tmp_path + self.stdout = fxt_stdout + self.host, self.port = BASE_URL.rsplit(":", maxsplit=1) + self.user = admin_user + self.password = USER_PASS + self.client = make_client( + host=self.host, port=self.port, credentials=(self.user, self.password) + ) + self.client.config.status_check_period = 0.01 + + yield + + def run_cli( + self, cmd: str, *args: str, expected_code: int = 0, organization: Optional[str] = None + ) -> str: + common_args = [ + f"--auth={self.user}:{self.password}", + f"--server-host={self.host}", + f"--server-port={self.port}", + ] + + if organization is not None: + common_args.append(f"--organization={organization}") + + run_cli( + self, + *common_args, + cmd, + *args, + expected_code=expected_code, + ) + return self.stdout.getvalue() diff --git a/tests/python/requirements.txt b/tests/python/requirements.txt index 6ef44c0f5edb..d43d9b61d5df 100644 --- a/tests/python/requirements.txt +++ b/tests/python/requirements.txt @@ -4,9 +4,11 @@ pytest-cases==3.6.13 pytest-timeout==2.1.0 pytest-cov==4.1.0 requests==2.32.2 -deepdiff==5.6.0 +deepdiff==7.0.1 boto3==1.17.61 Pillow==10.3.0 python-dateutil==2.8.2 pyyaml==6.0.0 -numpy==1.22.0 \ No newline at end of file +numpy==2.0.0 + +# TODO: update pytest to 7.0.0 and pytest-timeout to 2.3.1 (better debug in vscode) \ No newline at end of file diff --git a/tests/python/rest_api/test_analytics.py b/tests/python/rest_api/test_analytics.py index 7ac3004e63ff..68671889a21c 100644 --- a/tests/python/rest_api/test_analytics.py +++ b/tests/python/rest_api/test_analytics.py @@ -35,11 +35,17 @@ def _test_cannot_see(self, user): assert response.status_code == HTTPStatus.FORBIDDEN @pytest.mark.parametrize( - "privilege, is_allow", - [("admin", True), ("business", True), ("worker", False), ("user", False)], + "conditions, is_allow", + [ + (dict(privilege="admin"), True), + (dict(privilege="worker", has_analytics_access=False), False), + (dict(privilege="worker", has_analytics_access=True), True), + (dict(privilege="user", has_analytics_access=False), False), + (dict(privilege="user", has_analytics_access=True), True), + ], ) - def test_can_see(self, privilege, is_allow, find_users): - user = find_users(privilege=privilege)[0]["username"] + def test_can_see(self, conditions, is_allow, find_users): + user = find_users(**conditions)[0]["username"] if is_allow: self._test_can_see(user) diff --git a/tests/python/rest_api/test_analytics_reports.py b/tests/python/rest_api/test_analytics_reports.py index a50c053fc138..bb48b19e2dd5 100644 --- a/tests/python/rest_api/test_analytics_reports.py +++ b/tests/python/rest_api/test_analytics_reports.py @@ -4,7 +4,7 @@ import json from http import HTTPStatus -from typing import Any, Dict, Optional +from typing import Any, Optional import pytest from cvat_sdk.api_client import models @@ -67,7 +67,7 @@ def _test_get_report_200( job_id: Optional[int] = None, task_id: Optional[int] = None, project_id: Optional[int] = None, - expected_data: Optional[Dict[str, Any]] = None, + expected_data: Optional[dict[str, Any]] = None, **kwargs, ): params = self._get_query_params(job_id=job_id, task_id=task_id, project_id=project_id) diff --git a/tests/python/rest_api/test_cloud_storages.py b/tests/python/rest_api/test_cloud_storages.py index 9fc1739b9e0f..ce2db93cab56 100644 --- a/tests/python/rest_api/test_cloud_storages.py +++ b/tests/python/rest_api/test_cloud_storages.py @@ -58,7 +58,6 @@ def _test_cannot_see(self, user, storage_id): "group, is_owner, is_allow", [ ("admin", False, True), - ("business", False, False), ("user", True, True), ], ) @@ -302,7 +301,6 @@ def _test_cannot_update(self, user, storage_id, spec): "group, is_owner, is_allow", [ ("admin", False, True), - ("business", False, False), ("worker", True, True), ], ) @@ -387,7 +385,6 @@ def _test_cannot_see(self, user, storage_id): "group, is_owner, is_allow", [ ("admin", False, True), - ("business", False, False), ("user", True, True), ], ) diff --git a/tests/python/rest_api/test_issues.py b/tests/python/rest_api/test_issues.py index c6c043f2e449..f1cbfdafacd2 100644 --- a/tests/python/rest_api/test_issues.py +++ b/tests/python/rest_api/test_issues.py @@ -6,7 +6,7 @@ import json from copy import deepcopy from http import HTTPStatus -from typing import Any, Dict, List, Tuple +from typing import Any import pytest from cvat_sdk import models @@ -55,8 +55,6 @@ def _test_check_response(self, user, data, is_allow, **kwargs): [ ("admin", True, True), ("admin", False, True), - ("business", True, True), - ("business", False, False), ("worker", True, True), ("worker", False, False), ("user", True, True), @@ -185,8 +183,6 @@ def get_data(issue_id, *, username: str = None): [ ("admin", True, None, True), ("admin", False, None, True), - ("business", True, None, True), - ("business", False, None, False), ("user", True, None, True), ("user", False, None, False), ("worker", False, True, True), @@ -275,8 +271,6 @@ def _test_check_response(self, user, issue_id, expect_success, **kwargs): [ ("admin", True, None, True), ("admin", False, None, True), - ("business", True, None, True), - ("business", False, None, False), ("user", True, None, True), ("user", False, None, False), ("worker", False, True, True), @@ -373,7 +367,7 @@ def setup(self, restore_db_per_class, admin_user, comments, issues): def _get_endpoint(self, api_client: ApiClient) -> Endpoint: return api_client.comments_api.list_endpoint - def _get_field_samples(self, field: str) -> Tuple[Any, List[Dict[str, Any]]]: + def _get_field_samples(self, field: str) -> tuple[Any, list[dict[str, Any]]]: if field == "job_id": issue_id, issue_comments = super()._get_field_samples("issue_id") issue = next((s for s in self.sample_issues if s["id"] == issue_id)) diff --git a/tests/python/rest_api/test_jobs.py b/tests/python/rest_api/test_jobs.py index 6d5626fcda99..e7b405dce9e9 100644 --- a/tests/python/rest_api/test_jobs.py +++ b/tests/python/rest_api/test_jobs.py @@ -15,7 +15,7 @@ from http import HTTPStatus from io import BytesIO from itertools import groupby, product -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from typing import Any, Optional, Union import numpy as np import pytest @@ -71,7 +71,7 @@ def filter_jobs(jobs, tasks, org): @pytest.mark.usefixtures("restore_db_per_function") class TestPostJobs: - def _test_create_job_ok(self, user: str, data: Dict[str, Any], **kwargs): + def _test_create_job_ok(self, user: str, data: dict[str, Any], **kwargs): with make_api_client(user) as api_client: (_, response) = api_client.jobs_api.create( models.JobWriteRequest(**deepcopy(data)), **kwargs @@ -80,7 +80,7 @@ def _test_create_job_ok(self, user: str, data: Dict[str, Any], **kwargs): return response def _test_create_job_fails( - self, user: str, data: Dict[str, Any], *, expected_status: int, **kwargs + self, user: str, data: dict[str, Any], *, expected_status: int, **kwargs ): with make_api_client(user) as api_client: (_, response) = api_client.jobs_api.create( @@ -110,7 +110,7 @@ def test_can_create_gt_job_in_a_task( tasks, task_mode: str, frame_selection_method: str, - method_params: Set[str], + method_params: set[str], ): required_task_size = 15 @@ -544,7 +544,7 @@ def test_destroy_gt_job_in_org_task( @pytest.mark.usefixtures("restore_db_per_class") class TestGetJobs: def _test_get_job_200( - self, user, jid, *, expected_data: Optional[Dict[str, Any]] = None, **kwargs + self, user, jid, *, expected_data: Optional[dict[str, Any]] = None, **kwargs ): with make_api_client(user) as client: (_, response) = client.jobs_api.retrieve(jid, **kwargs) @@ -568,7 +568,7 @@ def test_admin_can_get_org_job(self, admin_user, jobs, tasks): job = next(job for job in jobs if tasks[job["task_id"]]["organization"] is not None) self._test_get_job_200(admin_user, job["id"], expected_data=job) - @pytest.mark.parametrize("groups", [["business"], ["user"]]) + @pytest.mark.parametrize("groups", [["user"]]) def test_non_admin_org_staff_can_get_job( self, groups, users, organizations, org_staff, jobs_by_org ): @@ -581,7 +581,7 @@ def test_non_admin_org_staff_can_get_job( job = jobs_by_org[org_id][0] self._test_get_job_200(user["username"], job["id"], expected_data=job) - @pytest.mark.parametrize("groups", [["business"], ["user"], ["worker"]]) + @pytest.mark.parametrize("groups", [["user"], ["worker"]]) def test_non_admin_job_staff_can_get_job(self, groups, users, jobs, is_job_staff): user, job = next( (user, job) @@ -591,7 +591,7 @@ def test_non_admin_job_staff_can_get_job(self, groups, users, jobs, is_job_staff ) self._test_get_job_200(user["username"], job["id"], expected_data=job) - @pytest.mark.parametrize("groups", [["business"], ["user"], ["worker"]]) + @pytest.mark.parametrize("groups", [["user"], ["worker"]]) def test_non_admin_non_job_staff_non_org_staff_cannot_get_job( self, groups, users, organizations, org_staff, jobs, is_job_staff ): @@ -691,6 +691,7 @@ def test_get_gt_job_in_org_task( @pytest.mark.usefixtures("restore_db_per_class") @pytest.mark.usefixtures("restore_redis_ondisk_per_class") +@pytest.mark.usefixtures("restore_redis_inmem_per_class") class TestGetGtJobData: def _delete_gt_job(self, user, gt_job_id): with make_api_client(user) as api_client: @@ -955,7 +956,7 @@ def test_admin_list_jobs(self, jobs, tasks, org): self._test_list_jobs_200("admin1", jobs, **kwargs) @pytest.mark.parametrize("org_id", ["", None, 1, 2]) - @pytest.mark.parametrize("groups", [["business"], ["user"], ["worker"], []]) + @pytest.mark.parametrize("groups", [["user"], ["worker"], []]) def test_non_admin_list_jobs( self, org_id, groups, users, jobs, tasks, projects, org_staff, is_org_member ): @@ -1024,8 +1025,6 @@ def _test_get_job_annotations_403(self, user, jid): [ (["admin"], True, True), (["admin"], False, True), - (["business"], True, True), - (["business"], False, False), (["worker"], True, True), (["worker"], False, False), (["user"], True, True), @@ -1093,7 +1092,7 @@ def test_member_get_job_annotations( @pytest.mark.parametrize("org", [1]) @pytest.mark.parametrize( "privilege, expect_success", - [("admin", True), ("business", False), ("worker", False), ("user", False)], + [("admin", True), ("worker", False), ("user", False)], ) def test_non_member_get_job_annotations( self, @@ -1191,7 +1190,7 @@ def test_member_update_job_annotations( @pytest.mark.parametrize("org", [2]) @pytest.mark.parametrize( "privilege, expect_success", - [("admin", True), ("business", False), ("worker", False), ("user", False)], + [("admin", True), ("worker", False), ("user", False)], ) def test_non_member_update_job_annotations( self, @@ -1218,8 +1217,6 @@ def test_non_member_update_job_annotations( [ ("admin", True, True), ("admin", False, True), - ("business", True, True), - ("business", False, False), ("worker", True, True), ("worker", False, False), ("user", True, True), @@ -1446,7 +1443,7 @@ def _test_export_dataset( username: str, jid: int, *, - api_version: Union[int, Tuple[int]], + api_version: Union[int, tuple[int]], local_download: bool = True, **kwargs, ) -> Optional[bytes]: @@ -1477,9 +1474,9 @@ def _test_export_annotations( def test_can_export_dataset_locally_and_to_cloud_with_both_api_versions( self, admin_user: str, - jobs_with_shapes: List, + jobs_with_shapes: list, filter_tasks, - api_version: Tuple[int], + api_version: tuple[int], local_download: bool, ): filter_ = "target_storage__location" @@ -1651,15 +1648,6 @@ def test_admin_get_org_job_preview(self, jobs, tasks): job_id = next(job["id"] for job in jobs if tasks[job["task_id"]]["organization"]) self._test_get_job_preview_200("admin2", job_id) - def test_business_can_get_job_preview_in_sandbox(self, find_users, jobs, is_job_staff): - username, job_id = next( - (user["username"], job["id"]) - for user in find_users(privilege="business") - for job in jobs - if is_job_staff(user["id"], job["id"]) - ) - self._test_get_job_preview_200(username, job_id) - def test_user_can_get_job_preview_in_sandbox(self, find_users, jobs, is_job_staff): username, job_id = next( (user["username"], job["id"]) @@ -1669,15 +1657,6 @@ def test_user_can_get_job_preview_in_sandbox(self, find_users, jobs, is_job_staf ) self._test_get_job_preview_200(username, job_id) - def test_business_cannot_get_job_preview_in_sandbox(self, find_users, jobs, is_job_staff): - username, job_id = next( - (user["username"], job["id"]) - for user in find_users(privilege="business") - for job in jobs - if not is_job_staff(user["id"], job["id"]) - ) - self._test_get_job_preview_403(username, job_id) - def test_user_cannot_get_job_preview_in_sandbox(self, find_users, jobs, is_job_staff): username, job_id = next( (user["username"], job["id"]) diff --git a/tests/python/rest_api/test_labels.py b/tests/python/rest_api/test_labels.py index d64133e7dd36..00cfd6225647 100644 --- a/tests/python/rest_api/test_labels.py +++ b/tests/python/rest_api/test_labels.py @@ -7,7 +7,7 @@ from copy import deepcopy from http import HTTPStatus from types import SimpleNamespace -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Optional import pytest from cvat_sdk import exceptions, models @@ -60,7 +60,7 @@ def setup(self, _base_setup): """ @staticmethod - def _labels_by_source(labels: List[Dict], *, source_key: str) -> Dict[int, List[Dict]]: + def _labels_by_source(labels: list[dict], *, source_key: str) -> dict[int, list[dict]]: labels_by_source = {} for label in labels: label_source = label.get(source_key) @@ -216,7 +216,7 @@ def setup(self, restore_db_per_class, admin_user, labels, jobs_wlc, tasks_wlc, p def _get_endpoint(self, api_client: ApiClient) -> Endpoint: return api_client.labels_api.list_endpoint - def _get_field_samples(self, field: str) -> Tuple[Any, List[Dict[str, Any]]]: + def _get_field_samples(self, field: str) -> tuple[Any, list[dict[str, Any]]]: if field == "parent": parent_id, gt_objects = self._get_field_samples("parent_id") parent_name = self._get_field( @@ -584,8 +584,8 @@ def _test_update_denied(self, user, lid, data, expected_status=HTTPStatus.FORBID return response def _get_patch_data( - self, original_data: Dict[str, Any], **overrides - ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + self, original_data: dict[str, Any], **overrides + ) -> tuple[dict[str, Any], dict[str, Any]]: result = deepcopy(original_data) result.update(overrides) diff --git a/tests/python/rest_api/test_memberships.py b/tests/python/rest_api/test_memberships.py index e03cac2e2779..a25074af4890 100644 --- a/tests/python/rest_api/test_memberships.py +++ b/tests/python/rest_api/test_memberships.py @@ -4,7 +4,7 @@ # SPDX-License-Identifier: MIT from http import HTTPStatus -from typing import ClassVar, List +from typing import ClassVar import pytest from cvat_sdk.api_client.api_client import ApiClient, Endpoint @@ -40,7 +40,7 @@ def test_can_filter_by_org_id(self, field_value, query_value, memberships): ) def test_non_admin_can_see_only_self_memberships(self, memberships): - non_admins = ["business1", "user1", "dummy1", "worker2"] + non_admins = ["user1", "dummy1", "worker2"] for username in non_admins: data = [obj for obj in memberships if obj["user"]["username"] == username] self._test_can_see_memberships(username, data) @@ -80,7 +80,7 @@ def test_can_use_simple_filter_for_object_list(self, field): @pytest.mark.usefixtures("restore_db_per_function") class TestPatchMemberships: _ORG: ClassVar[int] = 1 - ROLES: ClassVar[List[str]] = ["worker", "supervisor", "maintainer", "owner"] + ROLES: ClassVar[list[str]] = ["worker", "supervisor", "maintainer", "owner"] def _test_can_change_membership(self, user, membership_id, new_role): response = patch_method( diff --git a/tests/python/rest_api/test_organizations.py b/tests/python/rest_api/test_organizations.py index 5daee9e53537..50834b1fff83 100644 --- a/tests/python/rest_api/test_organizations.py +++ b/tests/python/rest_api/test_organizations.py @@ -31,7 +31,6 @@ class TestMetadataOrganizations: [ ("admin", None, None), ("user", None, False), - ("business", None, False), ("worker", None, False), (None, "owner", True), (None, "maintainer", True), @@ -79,7 +78,6 @@ class TestGetOrganizations: [ ("admin", None, None, True), ("user", None, False, False), - ("business", None, False, False), ("worker", None, False, False), (None, "owner", True, True), (None, "maintainer", True, True), @@ -182,7 +180,6 @@ def expected_data(self, organizations, request_data): [ ("admin", None, None, True), ("user", None, False, False), - ("business", None, False, False), ("worker", None, False, False), (None, "owner", True, True), (None, "maintainer", True, True), @@ -239,7 +236,6 @@ class TestDeleteOrganizations: (None, "worker", True, False), (None, "supervisor", True, False), ("user", None, False, False), - ("business", None, False, False), ("worker", None, False, False), ], ) diff --git a/tests/python/rest_api/test_projects.py b/tests/python/rest_api/test_projects.py index b0c8a3b247c4..d3d807d68088 100644 --- a/tests/python/rest_api/test_projects.py +++ b/tests/python/rest_api/test_projects.py @@ -16,10 +16,10 @@ from itertools import product from operator import itemgetter from time import sleep -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union import pytest -from cvat_sdk.api_client import ApiClient, Configuration, models +from cvat_sdk.api_client import ApiClient, Configuration, exceptions, models from cvat_sdk.api_client.api_client import Endpoint from cvat_sdk.api_client.exceptions import ForbiddenException from cvat_sdk.core.helpers import get_paginated_collection @@ -34,8 +34,16 @@ patch_method, post_method, ) - -from .utils import CollectionSimpleFilterTestBase, export_project_backup, export_project_dataset +from shared.utils.helpers import generate_image_files + +from .utils import ( + DATUMARO_FORMAT_FOR_DIMENSION, + CollectionSimpleFilterTestBase, + create_task, + export_dataset, + export_project_backup, + export_project_dataset, +) @pytest.mark.usefixtures("restore_db_per_class") @@ -447,7 +455,7 @@ def test_if_worker_cannot_create_project(self, find_users): spec = {"name": f"test {username} tries to create a project"} self._test_create_project_403(username, spec) - @pytest.mark.parametrize("privilege", ("admin", "business", "user")) + @pytest.mark.parametrize("privilege", ("admin", "user")) def test_if_user_can_create_project(self, find_users, privilege): privileged_users = find_users(privilege=privilege) assert len(privileged_users) @@ -498,7 +506,7 @@ def _create_user(cls, api_client: ApiClient, email: str) -> str: return json.loads(response.data) @classmethod - def _create_org(cls, api_client: ApiClient, members: Optional[Dict[str, str]] = None) -> str: + def _create_org(cls, api_client: ApiClient, members: Optional[dict[str, str]] = None) -> str: with api_client: (_, response) = api_client.organizations_api.create( models.OrganizationWriteRequest(slug="test_org_roles"), _parse_response=False @@ -611,6 +619,7 @@ def _check_cvat_for_video_project_annotations_meta(content, values_to_be_checked @pytest.mark.usefixtures("restore_db_per_function") @pytest.mark.usefixtures("restore_redis_inmem_per_function") +@pytest.mark.usefixtures("restore_redis_ondisk_per_function") class TestImportExportDatasetProject: @pytest.fixture(autouse=True) @@ -622,7 +631,7 @@ def _test_export_dataset( username: str, pid: int, *, - api_version: Union[int, Tuple[int]], + api_version: Union[int, tuple[int]], local_download: bool = True, **kwargs, ) -> Optional[bytes]: @@ -771,7 +780,7 @@ def test_can_import_export_dataset_with_some_format(self, format_name: str, api_ "local_download", (True, pytest.param(False, marks=pytest.mark.with_external_services)) ) def test_can_export_dataset_locally_and_to_cloud_with_both_api_versions( - self, admin_user: str, filter_projects, api_version: Tuple[int], local_download: bool + self, admin_user: str, filter_projects, api_version: tuple[int], local_download: bool ): filter_ = "target_storage__location" if local_download: @@ -984,6 +993,68 @@ def test_can_export_and_import_dataset_after_deleting_related_storage( self._test_import_project(admin_user, project_id, "CVAT 1.1", import_data) + @pytest.mark.parametrize( + "dimension, format_name", + [ + *DATUMARO_FORMAT_FOR_DIMENSION.items(), + ("2d", "CVAT 1.1"), + ("3d", "CVAT 1.1"), + ("2d", "COCO 1.0"), + ], + ) + def test_cant_import_annotations_as_project(self, admin_user, tasks, format_name, dimension): + task = next(t for t in tasks if t.get("size") if t["dimension"] == dimension) + + def _export_task(task_id: int, format_name: str) -> io.BytesIO: + with make_api_client(admin_user) as api_client: + return io.BytesIO( + export_dataset( + api_client.tasks_api, + api_version=2, + id=task_id, + format=format_name, + save_images=False, + ) + ) + + if format_name in list(DATUMARO_FORMAT_FOR_DIMENSION.values()): + with zipfile.ZipFile(_export_task(task["id"], format_name)) as zip_file: + annotations = zip_file.read("annotations/default.json") + + dataset_file = io.BytesIO(annotations) + dataset_file.name = "annotations.json" + elif format_name == "CVAT 1.1": + with zipfile.ZipFile(_export_task(task["id"], "CVAT for images 1.1")) as zip_file: + annotations = zip_file.read("annotations.xml") + + dataset_file = io.BytesIO(annotations) + dataset_file.name = "annotations.xml" + elif format_name == "COCO 1.0": + with zipfile.ZipFile(_export_task(task["id"], format_name)) as zip_file: + annotations = zip_file.read("annotations/instances_default.json") + + dataset_file = io.BytesIO(annotations) + dataset_file.name = "annotations.json" + else: + assert False + + with make_api_client(admin_user) as api_client: + project, _ = api_client.projects_api.create( + project_write_request=models.ProjectWriteRequest( + name=f"test_annotations_import_as_project {format_name}" + ) + ) + + import_data = {"dataset_file": dataset_file} + + with pytest.raises(exceptions.ApiException, match="Dataset file should be zip archive"): + self._test_import_project( + admin_user, + project.id, + format_name=format_name, + data=import_data, + ) + @pytest.mark.parametrize( "export_format, subset_path_template", [ @@ -1038,10 +1109,62 @@ def test_creates_subfolders_for_subsets_on_export( len([f for f in zip_file.namelist() if f.startswith(folder_prefix)]) > 0 ), f"No {folder_prefix} in {zip_file.namelist()}" + def test_export_project_with_honeypots(self, admin_user: str): + project_spec = { + "name": "Project with honeypots", + "labels": [{"name": "cat"}], + } + + with make_api_client(admin_user) as api_client: + project, _ = api_client.projects_api.create(project_spec) + + image_files = generate_image_files(3) + image_names = [i.name for i in image_files] + + task_params = { + "name": "Task with honeypots", + "segment_size": 1, + "project_id": project.id, + } + + data_params = { + "image_quality": 70, + "client_files": image_files, + "sorting_method": "random", + "validation_params": { + "mode": "gt_pool", + "frame_selection_method": "manual", + "frames_per_job_count": 1, + "frames": [image_files[-1].name], + }, + } + + create_task(admin_user, spec=task_params, data=data_params) + + dataset = export_project_dataset( + admin_user, api_version=2, save_images=True, id=project.id, format="COCO 1.0" + ) + + with zipfile.ZipFile(io.BytesIO(dataset)) as zip_file: + subset_path = "images/default" + assert ( + sorted( + [ + f[len(subset_path) + 1 :] + for f in zip_file.namelist() + if f.startswith(subset_path) + ] + ) + == image_names + ) + with zip_file.open("annotations/instances_default.json") as anno_file: + annotations = json.load(anno_file) + assert sorted([a["file_name"] for a in annotations["images"]]) == image_names + @pytest.mark.usefixtures("restore_db_per_function") class TestPatchProjectLabel: - def _get_project_labels(self, pid, user, **kwargs) -> List[models.Label]: + def _get_project_labels(self, pid, user, **kwargs) -> list[models.Label]: kwargs.setdefault("return_json", True) with make_api_client(user) as api_client: return get_paginated_collection( diff --git a/tests/python/rest_api/test_quality_control.py b/tests/python/rest_api/test_quality_control.py index 1886a0a62ac1..d03675c9156e 100644 --- a/tests/python/rest_api/test_quality_control.py +++ b/tests/python/rest_api/test_quality_control.py @@ -3,11 +3,12 @@ # SPDX-License-Identifier: MIT import json +from collections.abc import Iterable from copy import deepcopy from functools import partial from http import HTTPStatus from itertools import groupby -from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple +from typing import Any, Callable, Optional import pytest from cvat_sdk.api_client import exceptions, models @@ -84,7 +85,7 @@ def create_gt_job(self, user, task_id): def find_sandbox_task(self, tasks, jobs, users, is_task_staff): def _find( is_staff: bool, *, has_gt_jobs: Optional[bool] = None - ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + ) -> tuple[dict[str, Any], dict[str, Any]]: task = next( t for t in tasks @@ -116,7 +117,7 @@ def find_sandbox_task_without_gt(self, find_sandbox_task): def find_org_task(self, tasks, jobs, users, is_org_member, is_task_staff): def _find( is_staff: bool, user_org_role: str, *, has_gt_jobs: Optional[bool] = None - ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + ) -> tuple[dict[str, Any], dict[str, Any]]: for user in users: if user["is_superuser"]: continue @@ -249,7 +250,7 @@ def test_user_list_reports_in_org_task( @pytest.mark.usefixtures("restore_db_per_class") class TestGetQualityReports(_PermissionTestBase): def _test_get_report_200( - self, user: str, obj_id: int, *, expected_data: Optional[Dict[str, Any]] = None, **kwargs + self, user: str, obj_id: int, *, expected_data: Optional[dict[str, Any]] = None, **kwargs ): with make_api_client(user) as api_client: (_, response) = api_client.quality_api.retrieve_report(obj_id, **kwargs) @@ -308,7 +309,7 @@ def test_user_get_report_in_org_task( @pytest.mark.usefixtures("restore_db_per_class") class TestGetQualityReportData(_PermissionTestBase): def _test_get_report_data_200( - self, user: str, obj_id: int, *, expected_data: Optional[Dict[str, Any]] = None, **kwargs + self, user: str, obj_id: int, *, expected_data: Optional[dict[str, Any]] = None, **kwargs ): with make_api_client(user) as api_client: (_, response) = api_client.quality_api.retrieve_report_data(obj_id, **kwargs) @@ -603,7 +604,7 @@ def _test_check_status_of_report_creation_by_non_rq_job_owner( def test_non_rq_job_owner_cannot_check_status_of_report_creation_in_sandbox( self, - find_sandbox_task_without_gt: Callable[[bool], Tuple[Dict[str, Any], Dict[str, Any]]], + find_sandbox_task_without_gt: Callable[[bool], tuple[dict[str, Any], dict[str, Any]]], admin_user: str, users: Iterable, ): @@ -630,8 +631,8 @@ def test_non_rq_job_owner_cannot_check_status_of_report_creation_in_org( self, role: str, admin_user: str, - find_org_task_without_gt: Callable[[bool, str], Tuple[Dict[str, Any], Dict[str, Any]]], - find_users: Callable[..., List[Dict[str, Any]]], + find_org_task_without_gt: Callable[[bool, str], tuple[dict[str, Any], dict[str, Any]]], + find_users: Callable[..., list[dict[str, Any]]], ): task, task_staff = find_org_task_without_gt(is_staff=True, user_org_role="supervisor") @@ -657,8 +658,8 @@ def test_admin_can_check_status_of_report_creation( is_sandbox: bool, users: Iterable, admin_user: str, - find_org_task_without_gt: Callable[[bool, str], Tuple[Dict[str, Any], Dict[str, Any]]], - find_sandbox_task_without_gt: Callable[[bool], Tuple[Dict[str, Any], Dict[str, Any]]], + find_org_task_without_gt: Callable[[bool, str], tuple[dict[str, Any], dict[str, Any]]], + find_sandbox_task_without_gt: Callable[[bool], tuple[dict[str, Any], dict[str, Any]]], ): if is_sandbox: task, task_staff = find_sandbox_task_without_gt(is_staff=True) @@ -696,7 +697,7 @@ def setup(self, restore_db_per_class, admin_user, quality_reports, jobs, tasks): def _get_endpoint(self, api_client: ApiClient) -> Endpoint: return api_client.quality_api.list_reports_endpoint - def _get_field_samples(self, field: str) -> Tuple[Any, List[Dict[str, Any]]]: + def _get_field_samples(self, field: str) -> tuple[Any, list[dict[str, Any]]]: if field == "task_id": # This filter includes both the task and nested job reports task_id, task_reports = super()._get_field_samples(field) @@ -819,7 +820,7 @@ def setup( def _get_endpoint(self, api_client: ApiClient) -> Endpoint: return api_client.quality_api.list_conflicts_endpoint - def _get_field_samples(self, field: str) -> Tuple[Any, List[Dict[str, Any]]]: + def _get_field_samples(self, field: str) -> tuple[Any, list[dict[str, Any]]]: if field == "job_id": # This field is not included in the response job_id = self._find_valid_field_value(self.report_samples, field_path=["job_id"]) @@ -889,7 +890,7 @@ def test_can_use_simple_filter_for_object_list(self, field): @pytest.mark.usefixtures("restore_db_per_class") class TestListSettings(_PermissionTestBase): def _test_list_settings_200( - self, user: str, task_id: int, *, expected_data: Optional[Dict[str, Any]] = None, **kwargs + self, user: str, task_id: int, *, expected_data: Optional[dict[str, Any]] = None, **kwargs ): with make_api_client(user) as api_client: actual = get_paginated_collection( @@ -951,7 +952,7 @@ def test_user_list_settings_in_org_task( @pytest.mark.usefixtures("restore_db_per_class") class TestGetSettings(_PermissionTestBase): def _test_get_settings_200( - self, user: str, obj_id: int, *, expected_data: Optional[Dict[str, Any]] = None, **kwargs + self, user: str, obj_id: int, *, expected_data: Optional[dict[str, Any]] = None, **kwargs ): with make_api_client(user) as api_client: (_, response) = api_client.quality_api.retrieve_settings(obj_id, **kwargs) @@ -1016,9 +1017,9 @@ def _test_patch_settings_200( self, user: str, obj_id: int, - data: Dict[str, Any], + data: dict[str, Any], *, - expected_data: Optional[Dict[str, Any]] = None, + expected_data: Optional[dict[str, Any]] = None, **kwargs, ): with make_api_client(user) as api_client: @@ -1032,7 +1033,7 @@ def _test_patch_settings_200( return response - def _test_patch_settings_403(self, user: str, obj_id: int, data: Dict[str, Any], **kwargs): + def _test_patch_settings_403(self, user: str, obj_id: int, data: dict[str, Any], **kwargs): with make_api_client(user) as api_client: (_, response) = api_client.quality_api.partial_update_settings( obj_id, @@ -1045,7 +1046,7 @@ def _test_patch_settings_403(self, user: str, obj_id: int, data: Dict[str, Any], return response - def _get_request_data(self, data: Dict[str, Any]) -> Tuple[Dict[str, Any], Dict[str, Any]]: + def _get_request_data(self, data: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]: patched_data = deepcopy(data) for field, value in data.items(): @@ -1211,6 +1212,8 @@ def test_modified_task_produces_different_metrics( "oks_sigma", "compare_line_orientation", "panoptic_comparison", + "point_size_base", + "match_empty_frames", ], ) def test_settings_affect_metrics( @@ -1228,6 +1231,12 @@ def test_settings_affect_metrics( settings[parameter] = 1 - settings[parameter] if parameter == "group_match_threshold": settings[parameter] = 0.9 + elif parameter == "point_size_base": + settings[parameter] = next( + v + for v in models.PointSizeBaseEnum.allowed_values[("value",)].values() + if v != settings[parameter] + ) else: assert False @@ -1237,7 +1246,12 @@ def test_settings_affect_metrics( ) new_report = self.create_quality_report(admin_user, task_id) - assert new_report["summary"]["conflict_count"] != old_report["summary"]["conflict_count"] + if parameter == "match_empty_frames": + assert new_report["summary"]["valid_count"] != old_report["summary"]["valid_count"] + else: + assert ( + new_report["summary"]["conflict_count"] != old_report["summary"]["conflict_count"] + ) def test_old_report_can_be_loaded(self, admin_user, quality_reports): report = min((r for r in quality_reports if r["task_id"]), key=lambda r: r["id"]) diff --git a/tests/python/rest_api/test_remote_url.py b/tests/python/rest_api/test_remote_url.py index cc50a3284d58..c230aa3a9584 100644 --- a/tests/python/rest_api/test_remote_url.py +++ b/tests/python/rest_api/test_remote_url.py @@ -5,7 +5,7 @@ from http import HTTPStatus from time import sleep -from typing import Any, Dict +from typing import Any import pytest @@ -21,7 +21,7 @@ def _post_task_remote_data(username, task_id, resources): return post_method(username, f"tasks/{task_id}/data", data) -def _wait_until_task_is_created(username: str, rq_id: str) -> Dict[str, Any]: +def _wait_until_task_is_created(username: str, rq_id: str) -> dict[str, Any]: url = f"requests/{rq_id}" for _ in range(100): diff --git a/tests/python/rest_api/test_requests.py b/tests/python/rest_api/test_requests.py index f06e97ae7fba..a6f808f73056 100644 --- a/tests/python/rest_api/test_requests.py +++ b/tests/python/rest_api/test_requests.py @@ -4,7 +4,6 @@ import io from http import HTTPStatus -from typing import List from urllib.parse import urlparse import pytest @@ -29,6 +28,7 @@ @pytest.mark.usefixtures("restore_db_per_class") @pytest.mark.usefixtures("restore_redis_inmem_per_function") +@pytest.mark.usefixtures("restore_redis_ondisk_per_function") @pytest.mark.timeout(30) class TestRequestsListFilters(CollectionSimpleFilterTestBase): @@ -87,7 +87,7 @@ def fxt_make_requests( fxt_make_export_job_requests, fxt_download_file, ): - def _make_requests(project_ids: List[int], task_ids: List[int], job_ids: List[int]): + def _make_requests(project_ids: list[int], task_ids: list[int], job_ids: list[int]): # make requests to export projects|tasks|jobs annotations|datasets|backups fxt_make_export_project_requests(project_ids[1:]) fxt_make_export_task_requests(task_ids[1:]) @@ -161,7 +161,7 @@ def download_file(resource: str, rid: int, subresource: str): @pytest.fixture def fxt_make_export_project_requests(self): - def make_requests(project_ids: List[int]): + def make_requests(project_ids: list[int]): for project_id in project_ids: export_project_backup( self.user, api_version=2, id=project_id, download_result=False @@ -181,7 +181,7 @@ def make_requests(project_ids: List[int]): @pytest.fixture def fxt_make_export_task_requests(self): - def make_requests(task_ids: List[int]): + def make_requests(task_ids: list[int]): for task_id in task_ids: export_task_backup(self.user, api_version=2, id=task_id, download_result=False) export_task_dataset( @@ -195,7 +195,7 @@ def make_requests(task_ids: List[int]): @pytest.fixture def fxt_make_export_job_requests(self): - def make_requests(job_ids: List[int]): + def make_requests(job_ids: list[int]): for job_id in job_ids: export_job_dataset( self.user, @@ -230,7 +230,7 @@ def make_requests(job_ids: List[int]): ], ) def test_can_use_simple_filter_for_object_list( - self, simple_filter: str, values: List, fxt_resources_ids, fxt_make_requests + self, simple_filter: str, values: list, fxt_resources_ids, fxt_make_requests ): project_ids, task_ids, job_ids = fxt_resources_ids fxt_make_requests(project_ids, task_ids, job_ids) diff --git a/tests/python/rest_api/test_tasks.py b/tests/python/rest_api/test_tasks.py index c57dec13f639..15496cc31f73 100644 --- a/tests/python/rest_api/test_tasks.py +++ b/tests/python/rest_api/test_tasks.py @@ -14,6 +14,7 @@ import zipfile from abc import ABCMeta, abstractmethod from collections import Counter +from collections.abc import Generator, Iterable, Sequence from contextlib import closing from copy import deepcopy from datetime import datetime @@ -23,23 +24,10 @@ from itertools import chain, groupby, product from math import ceil from operator import itemgetter -from pathlib import Path +from pathlib import Path, PurePosixPath from tempfile import NamedTemporaryFile, TemporaryDirectory from time import sleep, time -from typing import ( - Any, - Callable, - ClassVar, - Dict, - Generator, - Iterable, - List, - Optional, - Sequence, - Set, - Tuple, - Union, -) +from typing import Any, Callable, ClassVar, Optional, Union import attrs import numpy as np @@ -76,9 +64,12 @@ ) from .utils import ( + DATUMARO_FORMAT_FOR_DIMENSION, CollectionSimpleFilterTestBase, + calc_end_frame, compare_annotations, create_task, + export_dataset, export_task_backup, export_task_dataset, parse_frame_step, @@ -94,6 +85,15 @@ def get_cloud_storage_content(username: str, cloud_storage_id: int, manifest: Op return [f"{f['name']}{'/' if str(f['type']) == 'DIR' else ''}" for f in data["content"]] +def count_frame_uses(data: Sequence[int], *, included_frames: Sequence[int]) -> dict[int, int]: + use_counts = {f: 0 for f in included_frames} + for f in data: + if f in included_frames: + use_counts[f] += 1 + + return use_counts + + @pytest.mark.usefixtures("restore_db_per_class") class TestGetTasks: def _test_task_list_200(self, user, project_id, data, exclude_paths="", **kwargs): @@ -141,7 +141,6 @@ def _test_assigned_users_to_see_task_data(self, tasks, users, is_task_staff, **k "groups, is_staff, is_allow", [ ("admin", False, True), - ("business", False, False), ], ) def test_project_tasks_visibility( @@ -350,7 +349,6 @@ def _test_users_to_create_task_in_project( "groups, is_staff, is_allow", [ ("admin", False, True), - ("business", False, False), ("user", True, True), ], ) @@ -511,8 +509,6 @@ def get_data(tid): [ ("admin", True, True), ("admin", False, True), - ("business", True, True), - ("business", False, False), ("worker", True, True), ("worker", False, False), ("user", True, True), @@ -746,7 +742,7 @@ def _test_can_export_dataset( username: str, task_id: int, *, - api_version: Union[int, Tuple[int]], + api_version: Union[int, tuple[int]], local_download: bool = True, **kwargs, ) -> Optional[bytes]: @@ -768,7 +764,7 @@ def test_can_export_task_dataset_locally_and_to_cloud_with_both_api_versions( admin_user, tasks_with_shapes, filter_tasks, - api_version: Tuple[int], + api_version: tuple[int], local_download: bool, ): filter_ = "target_storage__location" @@ -904,6 +900,7 @@ def test_can_export_task_to_coco_format(self, admin_user: str, tid: int, api_ver @pytest.mark.parametrize("api_version", (1, 2)) @pytest.mark.usefixtures("restore_db_per_function") + @pytest.mark.usefixtures("restore_redis_ondisk_per_function") def test_can_download_task_with_special_chars_in_name(self, admin_user: str, api_version: int): # Control characters in filenames may conflict with the Content-Disposition header # value restrictions, as it needs to include the downloaded file name. @@ -985,11 +982,52 @@ def test_uses_subset_name( subset_path in path for path in zip_file.namelist() ), f"No {subset_path} in {zip_file.namelist()}" + @pytest.mark.parametrize( + "dimension, mode", [("2d", "annotation"), ("2d", "interpolation"), ("3d", "annotation")] + ) + def test_datumaro_export_without_annotations_includes_image_info( + self, admin_user, tasks, mode, dimension + ): + task = next( + t for t in tasks if t.get("size") if t["mode"] == mode if t["dimension"] == dimension + ) + + with make_api_client(admin_user) as api_client: + dataset_file = io.BytesIO( + export_dataset( + api_client.tasks_api, + api_version=2, + id=task["id"], + format=DATUMARO_FORMAT_FOR_DIMENSION[dimension], + save_images=False, + ) + ) + + with zipfile.ZipFile(dataset_file) as zip_file: + annotations = json.loads(zip_file.read("annotations/default.json")) + + assert annotations["items"] + for item in annotations["items"]: + assert "media" not in item + + if dimension == "2d": + assert osp.splitext(item["image"]["path"])[0] == item["id"] + assert not Path(item["image"]["path"]).is_absolute() + assert tuple(item["image"]["size"]) > (0, 0) + elif dimension == "3d": + assert osp.splitext(osp.basename(item["point_cloud"]["path"]))[0] == item["id"] + assert not Path(item["point_cloud"]["path"]).is_absolute() + for related_image in item["related_images"]: + assert not Path(related_image["path"]).is_absolute() + if "size" in related_image: + assert tuple(related_image["size"]) > (0, 0) + @pytest.mark.usefixtures("restore_db_per_function") @pytest.mark.usefixtures("restore_cvat_data_per_function") @pytest.mark.usefixtures("restore_redis_ondisk_per_function") @pytest.mark.usefixtures("restore_redis_ondisk_after_class") +@pytest.mark.usefixtures("restore_redis_inmem_per_function") class TestPostTaskData: _USERNAME = "admin1" @@ -1527,17 +1565,17 @@ def _create_task_with_cloud_data( request, cloud_storage: Any, use_manifest: bool, - server_files: List[str], + server_files: list[str], use_cache: bool = True, sorting_method: str = "lexicographical", data_type: str = "image", video_frame_count: int = 10, - server_files_exclude: Optional[List[str]] = None, + server_files_exclude: Optional[list[str]] = None, org: str = "", - filenames: Optional[List[str]] = None, - task_spec_kwargs: Optional[Dict[str, Any]] = None, - data_spec_kwargs: Optional[Dict[str, Any]] = None, - ) -> Tuple[int, Any]: + filenames: Optional[list[str]] = None, + task_spec_kwargs: Optional[dict[str, Any]] = None, + data_spec_kwargs: Optional[dict[str, Any]] = None, + ) -> tuple[int, Any]: s3_client = s3.make_client(bucket=cloud_storage["resource"]) if data_type == "video": video = generate_video_file(video_frame_count) @@ -1643,8 +1681,8 @@ def test_create_task_with_cloud_storage_directories_and_excluded_files( cloud_storage_id: int, use_cache: bool, use_manifest: bool, - server_files: List[str], - server_files_exclude: Optional[List[str]], + server_files: list[str], + server_files_exclude: Optional[list[str]], task_size: int, org: str, cloud_storages, @@ -1690,8 +1728,8 @@ def test_create_task_with_cloud_storage_directories_and_predefined_sorting( self, cloud_storage_id: int, use_manifest: bool, - server_files: List[str], - expected_result: List[str], + server_files: list[str], + expected_result: list[str], org: str, cloud_storages, request, @@ -1933,7 +1971,7 @@ def test_create_task_with_cloud_storage_and_retrieve_data( ) def test_create_task_with_cloud_storage_and_check_data_sorting( self, - filenames: List[str], + filenames: list[str], sorting_method: str, cloud_storage_id: int, org: str, @@ -2024,7 +2062,7 @@ def test_can_specify_file_job_mapping(self): ) with make_api_client(self._USERNAME) as api_client: - jobs: List[models.JobRead] = get_paginated_collection( + jobs: list[models.JobRead] = get_paginated_collection( api_client.jobs_api.list_endpoint, task_id=task_id, sort="id" ) (task_meta, _) = api_client.tasks_api.retrieve_data_meta(id=task_id) @@ -2088,7 +2126,7 @@ def test_create_task_with_cloud_storage_directories_and_default_bucket_prefix( self, cloud_storage_id: int, use_manifest: bool, - server_files: List[str], + server_files: list[str], default_prefix: str, expected_task_size: int, org: str, @@ -2132,7 +2170,7 @@ def test_can_create_task_with_honeypots( self, fxt_test_name, frame_selection_method: str, - method_params: Set[str], + method_params: set[str], per_job_count_param: str, ): base_segment_size = 4 @@ -2237,6 +2275,15 @@ def test_can_create_task_with_honeypots( validation_frames ) + if frame_selection_method == "random_uniform": + # Test distribution + validation_frame_counts = { + f: annotation_job_frame_counts.get(f, 0) + 1 for f in validation_frames + } + assert max(validation_frame_counts.values()) <= 1 + min( + validation_frame_counts.values() + ) + # each job must have the specified number of validation frames for job_meta in annotation_job_metas: assert ( @@ -2316,7 +2363,7 @@ def test_can_create_task_with_gt_job_from_images( self, request: pytest.FixtureRequest, frame_selection_method: str, - method_params: Set[str], + method_params: set[str], ): segment_size = 4 total_frame_count = 15 @@ -2449,7 +2496,7 @@ def test_can_create_task_with_gt_job_from_video( self, request: pytest.FixtureRequest, frame_selection_method: str, - method_params: Set[str], + method_params: set[str], ): segment_size = 4 total_frame_count = 15 @@ -2650,8 +2697,8 @@ def read_frame(self, i: int) -> Image.Image: ... @attrs.define class _TaskSpecBase(_TaskSpec): - _params: Union[Dict, models.TaskWriteRequest] - _data_params: Union[Dict, models.DataRequest] + _params: Union[dict, models.TaskWriteRequest] + _data_params: Union[dict, models.DataRequest] size: int = attrs.field(kw_only=True) @property @@ -2699,8 +2746,9 @@ def read_frame(self, i: int) -> Image.Image: @pytest.mark.usefixtures("restore_db_per_class") @pytest.mark.usefixtures("restore_cvat_data_per_class") -@pytest.mark.usefixtures("restore_redis_ondisk_per_class") +@pytest.mark.usefixtures("restore_redis_ondisk_per_function") @pytest.mark.usefixtures("restore_redis_ondisk_after_class") +@pytest.mark.usefixtures("restore_redis_inmem_per_function") class TestTaskData: _USERNAME = "admin1" @@ -2715,7 +2763,7 @@ def _uploaded_images_task_fxt_base( step: Optional[int] = None, segment_size: Optional[int] = None, **data_kwargs, - ) -> Generator[Tuple[_ImagesTaskSpec, int], None, None]: + ) -> Generator[tuple[_ImagesTaskSpec, int], None, None]: task_params = { "name": f"{request.node.name}[{request.fixturename}]", "labels": [{"name": "a"}], @@ -2768,13 +2816,13 @@ def get_frame(i: int) -> bytes: @pytest.fixture(scope="class") def fxt_uploaded_images_task( self, request: pytest.FixtureRequest - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: yield from self._uploaded_images_task_fxt_base(request=request) @pytest.fixture(scope="class") def fxt_uploaded_images_task_with_segments( self, request: pytest.FixtureRequest - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: yield from self._uploaded_images_task_fxt_base(request=request, segment_size=4) @fixture(scope="class") @@ -2783,7 +2831,7 @@ def fxt_uploaded_images_task_with_segments( @parametrize("start_frame", [3, 7]) def fxt_uploaded_images_task_with_segments_start_stop_step( self, request: pytest.FixtureRequest, start_frame: int, stop_frame: Optional[int], step: int - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: yield from self._uploaded_images_task_fxt_base( request=request, frame_count=30, @@ -2800,7 +2848,7 @@ def _uploaded_images_task_with_honeypots_and_segments_base( start_frame: Optional[int] = None, step: Optional[int] = None, random_seed: int = 42, - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: validation_params = models.DataRequestValidationParams._from_openapi_data( mode="gt_pool", frame_selection_method="random_uniform", @@ -2862,14 +2910,14 @@ def _uploaded_images_task_with_honeypots_and_segments_base( @fixture(scope="class") def fxt_uploaded_images_task_with_honeypots_and_segments( self, request: pytest.FixtureRequest - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: yield from self._uploaded_images_task_with_honeypots_and_segments_base(request) @fixture(scope="class") @parametrize("start_frame, step", [(2, 3)]) def fxt_uploaded_images_task_with_honeypots_and_segments_start_step( self, request: pytest.FixtureRequest, start_frame: Optional[int], step: Optional[int] - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: yield from self._uploaded_images_task_with_honeypots_and_segments_base( request, start_frame=start_frame, step=step ) @@ -2878,7 +2926,7 @@ def fxt_uploaded_images_task_with_honeypots_and_segments_start_step( @parametrize("random_seed", [1, 2, 5]) def fxt_uploaded_images_task_with_honeypots_and_changed_real_frames( self, request: pytest.FixtureRequest, random_seed: int - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: with closing( self._uploaded_images_task_with_honeypots_and_segments_base( request, start_frame=2, step=3, random_seed=random_seed @@ -2919,7 +2967,7 @@ def _uploaded_images_task_with_gt_and_segments_base( start_frame: Optional[int] = None, step: Optional[int] = None, frame_selection_method: str = "random_uniform", - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: used_frames_count = 16 total_frame_count = (start_frame or 0) + used_frames_count * (step or 1) segment_size = 5 @@ -2979,7 +3027,7 @@ def fxt_uploaded_images_task_with_gt_and_segments_start_step( start_frame: Optional[int], step: Optional[int], frame_selection_method: str, - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: yield from self._uploaded_images_task_with_gt_and_segments_base( request, start_frame=start_frame, @@ -2996,7 +3044,7 @@ def _uploaded_video_task_fxt_base( start_frame: Optional[int] = None, stop_frame: Optional[int] = None, step: Optional[int] = None, - ) -> Generator[Tuple[_VideoTaskSpec, int], None, None]: + ) -> Generator[tuple[_VideoTaskSpec, int], None, None]: task_params = { "name": f"{request.node.name}[{request.fixturename}]", "labels": [{"name": "a"}], @@ -3040,13 +3088,13 @@ def get_video_file() -> io.BytesIO: def fxt_uploaded_video_task( self, request: pytest.FixtureRequest, - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: yield from self._uploaded_video_task_fxt_base(request=request) @pytest.fixture(scope="class") def fxt_uploaded_video_task_with_segments( self, request: pytest.FixtureRequest - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: yield from self._uploaded_video_task_fxt_base(request=request, segment_size=4) @fixture(scope="class") @@ -3055,7 +3103,7 @@ def fxt_uploaded_video_task_with_segments( @parametrize("start_frame", [3, 7]) def fxt_uploaded_video_task_with_segments_start_stop_step( self, request: pytest.FixtureRequest, start_frame: int, stop_frame: Optional[int], step: int - ) -> Generator[Tuple[_TaskSpec, int], None, None]: + ) -> Generator[tuple[_TaskSpec, int], None, None]: yield from self._uploaded_video_task_fxt_base( request=request, frame_count=30, @@ -3065,7 +3113,7 @@ def fxt_uploaded_video_task_with_segments_start_stop_step( step=step, ) - def _compute_annotation_segment_params(self, task_spec: _TaskSpec) -> List[Tuple[int, int]]: + def _compute_annotation_segment_params(self, task_spec: _TaskSpec) -> list[tuple[int, int]]: segment_params = [] frame_step = task_spec.frame_step segment_size = getattr(task_spec, "segment_size", 0) or task_spec.size * frame_step @@ -3073,7 +3121,7 @@ def _compute_annotation_segment_params(self, task_spec: _TaskSpec) -> List[Tuple stop_frame = getattr(task_spec, "stop_frame", None) or ( start_frame + (task_spec.size - 1) * frame_step ) - end_frame = stop_frame - ((stop_frame - start_frame) % frame_step) + frame_step + end_frame = calc_end_frame(start_frame, stop_frame, frame_step) validation_params = getattr(task_spec, "validation_params", None) if validation_params and validation_params.mode.value == "gt_pool": @@ -3589,7 +3637,7 @@ def get_expected_chunk_abs_frame_ids(chunk_id: int): @pytest.mark.usefixtures("restore_db_per_function") class TestPatchTaskLabel: - def _get_task_labels(self, pid, user, **kwargs) -> List[models.Label]: + def _get_task_labels(self, pid, user, **kwargs) -> list[models.Label]: kwargs.setdefault("return_json", True) with make_api_client(user) as api_client: return get_paginated_collection( @@ -3790,6 +3838,7 @@ def test_admin_can_add_skeleton(self, tasks, admin_user): @pytest.mark.usefixtures("restore_db_per_function") @pytest.mark.usefixtures("restore_cvat_data_per_function") @pytest.mark.usefixtures("restore_redis_ondisk_per_function") +@pytest.mark.usefixtures("restore_redis_inmem_per_function") class TestWorkWithTask: _USERNAME = "admin1" @@ -3863,7 +3912,7 @@ def setup( "local_download", (True, pytest.param(False, marks=pytest.mark.with_external_services)) ) def test_can_export_backup_with_both_api_versions( - self, filter_tasks, api_version: Tuple[int], local_download: bool + self, filter_tasks, api_version: tuple[int], local_download: bool ): task = filter_tasks( **{("exclude_" if local_download else "") + "target_storage__location": "cloud_storage"} @@ -4046,7 +4095,7 @@ class TestWorkWithSimpleGtJobTasks: @fixture def fxt_task_with_gt_job( self, tasks, jobs, job_has_annotations - ) -> Generator[Dict[str, Any], None, None]: + ) -> Generator[dict[str, Any], None, None]: gt_job = next( j for j in jobs @@ -4168,7 +4217,7 @@ class TestWorkWithHoneypotTasks: @fixture def fxt_task_with_honeypots( self, tasks, jobs, job_has_annotations - ) -> Generator[Dict[str, Any], None, None]: + ) -> Generator[dict[str, Any], None, None]: gt_job = next( j for j in jobs @@ -4412,6 +4461,15 @@ def test_can_change_honeypot_frames_in_task( api_client.tasks_api.retrieve_validation_layout(task["id"])[1].data ) + api_client.tasks_api.partial_update_validation_layout( + task["id"], + patched_task_validation_layout_write_request=models.PatchedTaskValidationLayoutWriteRequest( + frame_selection_method="manual", + honeypot_real_frames=old_validation_layout["honeypot_count"] + * [gt_frame_set[0]], + ), + ) + params = {"frame_selection_method": frame_selection_method} if frame_selection_method == "manual": @@ -4438,6 +4496,15 @@ def test_can_change_honeypot_frames_in_task( if frame_selection_method == "manual": assert new_honeypot_real_frames == requested_honeypot_real_frames + elif frame_selection_method == "random_uniform": + # Test distribution + validation_frame_counts = count_frame_uses( + new_honeypot_real_frames, + included_frames=new_validation_layout["validation_frames"], + ) + assert max(validation_frame_counts.values()) <= 1 + min( + validation_frame_counts.values() + ) assert ( DeepDiff( @@ -4465,10 +4532,13 @@ def test_can_change_honeypot_frames_in_task_can_only_select_from_active_validati gt_frame_set = range(gt_job["start_frame"], gt_job["stop_frame"] + 1) active_gt_set = gt_frame_set[:honeypots_per_job] - api_client.jobs_api.partial_update_data_meta( - gt_job["id"], - patched_job_data_meta_write_request=models.PatchedJobDataMetaWriteRequest( - deleted_frames=[f for f in gt_frame_set if f not in active_gt_set] + api_client.tasks_api.partial_update_validation_layout( + task["id"], + patched_task_validation_layout_write_request=models.PatchedTaskValidationLayoutWriteRequest( + disabled_frames=[f for f in gt_frame_set if f not in active_gt_set], + frame_selection_method="manual", + honeypot_real_frames=old_validation_layout["honeypot_count"] + * [active_gt_set[0]], ), ) @@ -4511,7 +4581,7 @@ def test_can_change_honeypot_frames_in_task_can_only_select_from_active_validati new_honeypot_real_frames = new_validation_layout["honeypot_real_frames"] assert old_validation_layout["honeypot_count"] == len(new_honeypot_real_frames) - assert all(f in active_gt_set for f in new_honeypot_real_frames) + assert all([f in active_gt_set for f in new_honeypot_real_frames]) if frame_selection_method == "manual": assert new_honeypot_real_frames == requested_honeypot_real_frames @@ -4530,11 +4600,104 @@ def test_can_change_honeypot_frames_in_task_can_only_select_from_active_validati ] ), new_honeypot_real_frames + # Test distribution + validation_frame_counts = count_frame_uses( + new_honeypot_real_frames, included_frames=active_gt_set + ) + assert max(validation_frame_counts.values()) <= 1 + min( + validation_frame_counts.values() + ) + + @parametrize("task, gt_job, annotation_jobs", [fixture_ref(fxt_task_with_honeypots)]) + @parametrize("frame_selection_method", ["manual", "random_uniform"]) + def test_can_restore_and_change_honeypot_frames_in_task_in_the_same_request( + self, admin_user, task, gt_job, annotation_jobs, frame_selection_method: str + ): + assert gt_job["stop_frame"] - gt_job["start_frame"] + 1 >= 2 + + with make_api_client(admin_user) as api_client: + old_validation_layout = json.loads( + api_client.tasks_api.retrieve_validation_layout(task["id"])[1].data + ) + + honeypots_per_job = old_validation_layout["frames_per_job_count"] + + gt_frame_set = range(gt_job["start_frame"], gt_job["stop_frame"] + 1) + active_gt_set = gt_frame_set[:honeypots_per_job] + + api_client.tasks_api.partial_update_validation_layout( + task["id"], + patched_task_validation_layout_write_request=models.PatchedTaskValidationLayoutWriteRequest( + disabled_frames=[f for f in gt_frame_set if f not in active_gt_set], + frame_selection_method="manual", + honeypot_real_frames=old_validation_layout["honeypot_count"] + * [active_gt_set[0]], + ), + ) + + active_gt_set = gt_frame_set + + params = { + "frame_selection_method": frame_selection_method, + "disabled_frames": [], # restore all validation frames + } + + if frame_selection_method == "manual": + requested_honeypot_real_frames = [ + active_gt_set[(old_real_frame + 1) % len(active_gt_set)] + for old_real_frame in old_validation_layout["honeypot_real_frames"] + ] + + params["honeypot_real_frames"] = requested_honeypot_real_frames + + new_validation_layout = json.loads( + api_client.tasks_api.partial_update_validation_layout( + task["id"], + patched_task_validation_layout_write_request=( + models.PatchedTaskValidationLayoutWriteRequest(**params) + ), + )[1].data + ) + + new_honeypot_real_frames = new_validation_layout["honeypot_real_frames"] + + assert old_validation_layout["honeypot_count"] == len(new_honeypot_real_frames) + assert sorted(new_validation_layout["disabled_frames"]) == sorted( + params["disabled_frames"] + ) + + if frame_selection_method == "manual": + assert new_honeypot_real_frames == requested_honeypot_real_frames + else: + assert all( + [ + honeypots_per_job + == len( + set( + new_honeypot_real_frames[ + j * honeypots_per_job : (j + 1) * honeypots_per_job + ] + ) + ) + ] + for j in range(len(annotation_jobs)) + ), new_honeypot_real_frames + + # Test distribution + validation_frame_counts = count_frame_uses( + new_honeypot_real_frames, included_frames=active_gt_set + ) + assert max(validation_frame_counts.values()) <= 1 + min( + validation_frame_counts.values() + ) + @parametrize("task, gt_job, annotation_jobs", [fixture_ref(fxt_task_with_honeypots)]) @parametrize("frame_selection_method", ["manual", "random_uniform"]) def test_can_change_honeypot_frames_in_annotation_jobs( self, admin_user, task, gt_job, annotation_jobs, frame_selection_method: str ): + _MAX_RANDOM_ATTEMPTS = 20 # This test can have random outcomes, it's expected + assert gt_job["stop_frame"] - gt_job["start_frame"] + 1 >= 2 with make_api_client(admin_user) as api_client: @@ -4556,16 +4719,34 @@ def test_can_change_honeypot_frames_in_annotation_jobs( params["honeypot_real_frames"] = requested_honeypot_real_frames - new_validation_layout = json.loads( - api_client.jobs_api.partial_update_validation_layout( - annotation_job["id"], - patched_job_validation_layout_write_request=( - models.PatchedJobValidationLayoutWriteRequest(**params) - ), - )[1].data - ) + attempt = 0 + while attempt < _MAX_RANDOM_ATTEMPTS: + new_validation_layout = json.loads( + api_client.jobs_api.partial_update_validation_layout( + annotation_job["id"], + patched_job_validation_layout_write_request=( + models.PatchedJobValidationLayoutWriteRequest(**params) + ), + )[1].data + ) - new_honeypot_real_frames = new_validation_layout["honeypot_real_frames"] + new_honeypot_real_frames = new_validation_layout["honeypot_real_frames"] + + if ( + frame_selection_method == "random_uniform" + and new_honeypot_real_frames + == old_validation_layout["honeypot_real_frames"] + ): + attempt += 1 + # The test is fully random, it's possible to get no changes in the updated + # honeypots. Passing a random seed has little sense in this endpoint, + # so we retry several times in such a case instead. + else: + break + + if attempt >= _MAX_RANDOM_ATTEMPTS and frame_selection_method == "random_uniform": + # The situation is unlikely if everything works, so we consider it a fail + pytest.fail(f"too many attempts ({attempt}) with random honeypot updating") assert old_validation_layout["honeypot_count"] == len(new_honeypot_real_frames) assert all(f in gt_frame_set for f in new_honeypot_real_frames) @@ -4650,7 +4831,7 @@ def test_task_unassigned_cannot_see_task_preview( self._test_assigned_users_cannot_see_task_preview(tasks, users, is_task_staff) -@pytest.mark.usefixtures("restore_redis_ondisk_per_class") +@pytest.mark.usefixtures("restore_redis_ondisk_per_function") @pytest.mark.usefixtures("restore_redis_ondisk_after_class") class TestUnequalJobs: @pytest.fixture(autouse=True) @@ -5177,6 +5358,47 @@ def test_import_annotations_after_deleting_related_cloud_storage( task.import_annotations(self.import_format, file_path) self._check_annotations(task_id) + @pytest.mark.parametrize("dimension", ["2d", "3d"]) + def test_can_import_datumaro_json(self, admin_user, tasks, dimension): + task = next( + t + for t in tasks + if t.get("size") + if t["dimension"] == dimension and t.get("validation_mode") != "gt_pool" + ) + + with make_api_client(admin_user) as api_client: + original_annotations = json.loads( + api_client.tasks_api.retrieve_annotations(task["id"])[1].data + ) + + dataset_archive = io.BytesIO( + export_dataset( + api_client.tasks_api, + api_version=2, + id=task["id"], + format=DATUMARO_FORMAT_FOR_DIMENSION[dimension], + save_images=False, + ) + ) + + with zipfile.ZipFile(dataset_archive) as zip_file: + annotations = zip_file.read("annotations/default.json") + + with TemporaryDirectory() as tempdir: + annotations_path = Path(tempdir) / "annotations.json" + annotations_path.write_bytes(annotations) + self.client.tasks.retrieve(task["id"]).import_annotations( + DATUMARO_FORMAT_FOR_DIMENSION[dimension], annotations_path + ) + + with make_api_client(admin_user) as api_client: + updated_annotations = json.loads( + api_client.tasks_api.retrieve_annotations(task["id"])[1].data + ) + + assert compare_annotations(original_annotations, updated_annotations) == {} + @pytest.mark.parametrize( "format_name", [ @@ -5251,7 +5473,7 @@ def setup_class( cls._init_tasks() @classmethod - def _create_task_with_annotations(cls, filenames: List[str]): + def _create_task_with_annotations(cls, filenames: list[str]): images = generate_image_files(len(filenames), filenames=filenames) source_archive_path = cls.tmp_dir / "source_data.zip" @@ -6240,3 +6462,69 @@ def check_element_outside_count(track_idx, element_idx, expected_count): check_element_outside_count(1, 0, 1) check_element_outside_count(1, 1, 2) check_element_outside_count(1, 2, 2) + + +@pytest.mark.usefixtures("restore_db_per_class") +@pytest.mark.usefixtures("restore_redis_ondisk_per_function") +@pytest.mark.usefixtures("restore_redis_ondisk_after_class") +@pytest.mark.usefixtures("restore_redis_inmem_per_function") +class TestPatchExportFrames(TestTaskData): + + @fixture(scope="class") + @parametrize("media_type", [_SourceDataType.images, _SourceDataType.video]) + @parametrize("step", [5]) + @parametrize("frame_count", [20]) + @parametrize("start_frame", [None, 3]) + def fxt_uploaded_media_task( + self, + request: pytest.FixtureRequest, + media_type: _SourceDataType, + step: int, + frame_count: int, + start_frame: Optional[int], + ) -> Generator[tuple[_TaskSpec, Task, str], None, None]: + args = dict(request=request, frame_count=frame_count, step=step, start_frame=start_frame) + + if media_type == _SourceDataType.images: + (spec, task_id) = next(self._uploaded_images_task_fxt_base(**args)) + else: + (spec, task_id) = next(self._uploaded_video_task_fxt_base(**args)) + + with make_sdk_client(self._USERNAME) as client: + task = client.tasks.retrieve(task_id) + + yield (spec, task, f"CVAT for {media_type} 1.1") + + @pytest.mark.usefixtures("restore_redis_ondisk_per_function") + @parametrize("spec, task, format_name", [fixture_ref(fxt_uploaded_media_task)]) + def test_export_with_non_default_frame_step( + self, tmp_path: Path, spec: _TaskSpec, task: Task, format_name: str + ): + + dataset_file = tmp_path / "dataset.zip" + task.export_dataset(format_name, dataset_file, include_images=True) + + def get_img_index(zinfo: zipfile.ZipInfo) -> int: + name = PurePosixPath(zinfo.filename) + if name.suffix.lower() not in (".png", ".jpg", ".jpeg"): + return -1 + return int(name.stem.rsplit("_", maxsplit=1)[-1]) + + # get frames and sort them + with zipfile.ZipFile(dataset_file) as dataset: + frames = np.array( + [png_idx for png_idx in map(get_img_index, dataset.filelist) if png_idx != -1] + ) + frames.sort() + + task_meta = task.get_meta() + (src_start_frame, src_stop_frame, src_frame_step) = ( + task_meta["start_frame"], + task_meta["stop_frame"], + spec.frame_step, + ) + src_end_frame = calc_end_frame(src_start_frame, src_stop_frame, src_frame_step) + assert len(frames) == spec.size == task_meta["size"], "Some frames were lost" + assert np.all( + frames == np.arange(src_start_frame, src_end_frame, src_frame_step) + ), "Some frames are wrong" diff --git a/tests/python/rest_api/test_webhooks.py b/tests/python/rest_api/test_webhooks.py index 3c528bc78c15..778eda8430ed 100644 --- a/tests/python/rest_api/test_webhooks.py +++ b/tests/python/rest_api/test_webhooks.py @@ -96,7 +96,7 @@ def test_admin_can_create_webhook_for_project_in_org( assert response.status_code == HTTPStatus.CREATED assert "secret" not in response.json() - @pytest.mark.parametrize("privilege", ["user", "business"]) + @pytest.mark.parametrize("privilege", ["user"]) def test_sandbox_project_owner_can_create_webhook_for_project(self, privilege, projects, users): users = [user for user in users if privilege in user["groups"]] username, project_id = next( @@ -116,7 +116,7 @@ def test_sandbox_project_owner_can_create_webhook_for_project(self, privilege, p assert response.status_code == HTTPStatus.CREATED assert "secret" not in response.json() - @pytest.mark.parametrize("privilege", ["worker", "user", "business"]) + @pytest.mark.parametrize("privilege", ["worker", "user"]) def test_sandbox_project_assignee_cannot_create_webhook_for_project( self, privilege, projects, users ): @@ -410,7 +410,7 @@ def test_admin_can_get_webhook(self, webhooks, users, projects): assert "secret" not in response.json() assert DeepDiff(webhooks[wid], response.json(), ignore_order=True) == {} - @pytest.mark.parametrize("privilege", ["user", "business"]) + @pytest.mark.parametrize("privilege", ["user"]) def test_project_owner_can_get_webhook(self, privilege, webhooks, projects, users): proj_webhooks = [w for w in webhooks if w["type"] == "project"] username, wid = next( @@ -418,7 +418,7 @@ def test_project_owner_can_get_webhook(self, privilege, webhooks, projects, user (user["username"], webhook["id"]) for user in users for webhook in proj_webhooks - if privilege not in user["groups"] + if privilege in user["groups"] and projects[webhook["project_id"]]["owner"]["id"] == user["id"] ) ) @@ -429,7 +429,7 @@ def test_project_owner_can_get_webhook(self, privilege, webhooks, projects, user assert "secret" not in response.json() assert DeepDiff(webhooks[wid], response.json(), ignore_order=True) == {} - @pytest.mark.parametrize("privilege", ["user", "business"]) + @pytest.mark.parametrize("privilege", ["user"]) def test_webhook_owner_can_get_webhook(self, privilege, webhooks, projects, users): proj_webhooks = [w for w in webhooks if w["type"] == "project"] username, wid = next( @@ -447,7 +447,7 @@ def test_webhook_owner_can_get_webhook(self, privilege, webhooks, projects, user assert "secret" not in response.json() assert DeepDiff(webhooks[wid], response.json(), ignore_order=True) == {} - @pytest.mark.parametrize("privilege", ["user", "business"]) + @pytest.mark.parametrize("privilege", ["user"]) def test_not_project_staff_cannot_get_webhook(self, privilege, webhooks, projects, users): proj_webhooks = [w for w in webhooks if w["type"] == "project"] username, wid = next( @@ -631,7 +631,7 @@ def test_admin_can_get_webhooks_for_project_in_org(self, webhooks): assert response.status_code == HTTPStatus.OK assert DeepDiff(expected_response, response.json()["results"], ignore_order=True) == {} - @pytest.mark.parametrize("privilege", ["user", "business"]) + @pytest.mark.parametrize("privilege", ["user"]) def test_user_cannot_get_webhook_list_for_project( self, privilege, find_users, webhooks, projects ): @@ -654,7 +654,7 @@ def test_user_cannot_get_webhook_list_for_project( assert response.status_code == HTTPStatus.OK assert DeepDiff([], response.json()["results"], ignore_order=True) == {} - @pytest.mark.parametrize("privilege", ["user", "business"]) + @pytest.mark.parametrize("privilege", ["user"]) def test_user_can_get_webhook_list_for_project(self, privilege, find_users, webhooks, projects): username, pid = next( ( @@ -824,7 +824,7 @@ def test_cannot_update_with_nonexistent_contenttype(self): response = patch_method("admin2", f"webhooks/{self.WID}", patch_data) assert response.status_code == HTTPStatus.BAD_REQUEST - @pytest.mark.parametrize("privilege", ["user", "business"]) + @pytest.mark.parametrize("privilege", ["user"]) def test_sandbox_user_can_update_webhook(self, privilege, find_users, webhooks): username, webhook = next( ( @@ -852,7 +852,7 @@ def test_sandbox_user_can_update_webhook(self, privilege, find_users, webhooks): == {} ) - @pytest.mark.parametrize("privilege", ["worker", "user", "business"]) + @pytest.mark.parametrize("privilege", ["worker", "user"]) def test_sandbox_user_cannot_update_webhook(self, privilege, find_users, webhooks): username, webhook = next( ( @@ -1029,9 +1029,7 @@ def test_member_can_update_project_webhook_in_org( @pytest.mark.usefixtures("restore_db_per_function") class TestDeleteWebhooks: - @pytest.mark.parametrize( - "privilege, allow", [("user", False), ("business", False), ("admin", True)] - ) + @pytest.mark.parametrize("privilege, allow", [("user", False), ("admin", True)]) def test_user_can_delete_project_webhook( self, privilege, allow, find_users, webhooks, projects ): @@ -1101,7 +1099,7 @@ def test_admin_can_delete_org_webhook(self, find_users, webhooks, is_org_member) response = get_method(username, f"webhooks/{webhook_id}") assert response.status_code == HTTPStatus.NOT_FOUND - @pytest.mark.parametrize("privilege", ["user", "business"]) + @pytest.mark.parametrize("privilege", ["user"]) def test_project_owner_can_delete_project_webhook( self, privilege, find_users, webhooks, projects ): @@ -1123,7 +1121,7 @@ def test_project_owner_can_delete_project_webhook( response = get_method(username, f"webhooks/{webhook_id}") assert response.status_code == HTTPStatus.NOT_FOUND - @pytest.mark.parametrize("privilege", ["user", "business"]) + @pytest.mark.parametrize("privilege", ["user"]) def test_webhook_owner_can_delete_project_webhook( self, privilege, find_users, webhooks, projects ): diff --git a/tests/python/rest_api/utils.py b/tests/python/rest_api/utils.py index 0552efc737f4..8d5032998358 100644 --- a/tests/python/rest_api/utils.py +++ b/tests/python/rest_api/utils.py @@ -4,10 +4,11 @@ import json from abc import ABCMeta, abstractmethod +from collections.abc import Iterator, Sequence from copy import deepcopy from http import HTTPStatus from time import sleep -from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Optional, Union import requests from cvat_sdk.api_client import apis, models @@ -43,7 +44,7 @@ def initialize_export(endpoint: Endpoint, *, expect_forbidden: bool = False, **k def wait_and_download_v1( endpoint: Endpoint, *, - max_retries: int = 30, + max_retries: int = 50, interval: float = 0.1, download_result: bool = True, **kwargs, @@ -74,7 +75,7 @@ def wait_and_download_v1( def export_v1( endpoint: Endpoint, *, - max_retries: int = 30, + max_retries: int = 50, interval: float = 0.1, expect_forbidden: bool = False, wait_result: bool = True, @@ -114,7 +115,7 @@ def wait_and_download_v2( api_client: ApiClient, rq_id: str, *, - max_retries: int = 30, + max_retries: int = 50, interval: float = 0.1, download_result: bool = True, ) -> Optional[bytes]: @@ -152,7 +153,7 @@ def wait_and_download_v2( def export_v2( endpoint: Endpoint, *, - max_retries: int = 30, + max_retries: int = 50, interval: float = 0.1, expect_forbidden: bool = False, wait_result: bool = True, @@ -191,11 +192,11 @@ def export_v2( def export_dataset( api: Union[ProjectsApi, TasksApi, JobsApi], api_version: Union[ - int, Tuple[int] + int, tuple[int] ], # make this parameter required to be sure that all tests was updated and both API versions are used *, save_images: bool, - max_retries: int = 30, + max_retries: int = 50, interval: float = 0.1, format: str = "CVAT for images 1.1", # pylint: disable=redefined-builtin **kwargs, @@ -251,21 +252,21 @@ def _get_endpoint_and_kwargs(version: int) -> Endpoint: # FUTURE-TODO: support username: optional, api_client: optional def export_project_dataset( - username: str, api_version: Union[int, Tuple[int]], *args, **kwargs + username: str, api_version: Union[int, tuple[int]], *args, **kwargs ) -> Optional[bytes]: with make_api_client(username) as api_client: return export_dataset(api_client.projects_api, api_version, *args, **kwargs) def export_task_dataset( - username: str, api_version: Union[int, Tuple[int]], *args, **kwargs + username: str, api_version: Union[int, tuple[int]], *args, **kwargs ) -> Optional[bytes]: with make_api_client(username) as api_client: return export_dataset(api_client.tasks_api, api_version, *args, **kwargs) def export_job_dataset( - username: str, api_version: Union[int, Tuple[int]], *args, **kwargs + username: str, api_version: Union[int, tuple[int]], *args, **kwargs ) -> Optional[bytes]: with make_api_client(username) as api_client: return export_dataset(api_client.jobs_api, api_version, *args, **kwargs) @@ -274,10 +275,10 @@ def export_job_dataset( def export_backup( api: Union[ProjectsApi, TasksApi], api_version: Union[ - int, Tuple[int] + int, tuple[int] ], # make this parameter required to be sure that all tests was updated and both API versions are used *, - max_retries: int = 30, + max_retries: int = 50, interval: float = 0.1, **kwargs, ) -> Optional[bytes]: @@ -309,14 +310,14 @@ def export_backup( def export_project_backup( - username: str, api_version: Union[int, Tuple[int]], *args, **kwargs + username: str, api_version: Union[int, tuple[int]], *args, **kwargs ) -> Optional[bytes]: with make_api_client(username) as api_client: return export_backup(api_client.projects_api, api_version, *args, **kwargs) def export_task_backup( - username: str, api_version: Union[int, Tuple[int]], *args, **kwargs + username: str, api_version: Union[int, tuple[int]], *args, **kwargs ) -> Optional[bytes]: with make_api_client(username) as api_client: return export_backup(api_client.tasks_api, api_version, *args, **kwargs) @@ -325,7 +326,7 @@ def export_task_backup( def import_resource( endpoint: Endpoint, *, - max_retries: int = 30, + max_retries: int = 50, interval: float = 0.1, expect_forbidden: bool = False, wait_result: bool = True, @@ -371,7 +372,7 @@ def import_resource( def import_backup( api: Union[ProjectsApi, TasksApi], *, - max_retries: int = 30, + max_retries: int = 50, interval: float = 0.1, **kwargs, ) -> None: @@ -379,12 +380,12 @@ def import_backup( return import_resource(endpoint, max_retries=max_retries, interval=interval, **kwargs) -def import_project_backup(username: str, data: Dict, **kwargs) -> None: +def import_project_backup(username: str, data: dict, **kwargs) -> None: with make_api_client(username) as api_client: return import_backup(api_client.projects_api, project_file_request=deepcopy(data), **kwargs) -def import_task_backup(username: str, data: Dict, **kwargs) -> None: +def import_task_backup(username: str, data: dict, **kwargs) -> None: with make_api_client(username) as api_client: return import_backup(api_client.tasks_api, task_file_request=deepcopy(data), **kwargs) @@ -395,20 +396,20 @@ def import_task_backup(username: str, data: Dict, **kwargs) -> None: class CollectionSimpleFilterTestBase(metaclass=ABCMeta): # These fields need to be defined in the subclass user: str - samples: List[Dict[str, Any]] - field_lookups: Dict[str, FieldPath] = None - cmp_ignore_keys: List[str] = ["updated_date"] + samples: list[dict[str, Any]] + field_lookups: dict[str, FieldPath] = None + cmp_ignore_keys: list[str] = ["updated_date"] @abstractmethod def _get_endpoint(self, api_client: ApiClient) -> Endpoint: ... - def _retrieve_collection(self, **kwargs) -> List: + def _retrieve_collection(self, **kwargs) -> list: kwargs["return_json"] = True with make_api_client(self.user) as api_client: return get_paginated_collection(self._get_endpoint(api_client), **kwargs) @classmethod - def _get_field(cls, d: Dict[str, Any], path: Union[str, FieldPath]) -> Optional[Any]: + def _get_field(cls, d: dict[str, Any], path: Union[str, FieldPath]) -> Optional[Any]: assert path for key in path: if isinstance(d, dict): @@ -428,7 +429,7 @@ def _map_field(self, name: str) -> FieldPath: @classmethod def _find_valid_field_value( - cls, samples: Iterator[Dict[str, Any]], field_path: FieldPath + cls, samples: Iterator[dict[str, Any]], field_path: FieldPath ) -> Any: value = None for sample in samples: @@ -439,7 +440,7 @@ def _find_valid_field_value( assert value, f"Failed to find a sample for the '{'.'.join(field_path)}' field" return value - def _get_field_samples(self, field: str) -> Tuple[Any, List[Dict[str, Any]]]: + def _get_field_samples(self, field: str) -> tuple[Any, list[dict[str, Any]]]: field_path = self._map_field(field) field_value = self._find_valid_field_value(self.samples, field_path) @@ -463,7 +464,7 @@ def _compare_results(self, gt_objects, received_objects): assert diff == {}, diff def _test_can_use_simple_filter_for_object_list( - self, field: str, field_values: Optional[List[Any]] = None + self, field: str, field_values: Optional[list[Any]] = None ): gt_objects = [] field_path = self._map_field(field) @@ -485,12 +486,12 @@ def _test_can_use_simple_filter_for_object_list( self._compare_results(gt_objects, received_items) -def get_attrs(obj: Any, attributes: Sequence[str]) -> Tuple[Any, ...]: +def get_attrs(obj: Any, attributes: Sequence[str]) -> tuple[Any, ...]: """Returns 1 or more object attributes as a tuple""" return (getattr(obj, attr) for attr in attributes) -def build_exclude_paths_expr(ignore_fields: Iterator[str]) -> List[str]: +def build_exclude_paths_expr(ignore_fields: Iterator[str]) -> list[str]: exclude_expr_parts = [] for key in ignore_fields: if "." in key: @@ -572,7 +573,7 @@ def create_task(username, spec, data, content_type="application/json", **kwargs) return task.id, response_.headers.get("X-Request-Id") -def compare_annotations(a, b): +def compare_annotations(a: dict, b: dict) -> dict: def _exclude_cb(obj, path): return path.endswith("['elements']") and not obj @@ -592,5 +593,15 @@ def _exclude_cb(obj, path): ) +DATUMARO_FORMAT_FOR_DIMENSION = { + "2d": "Datumaro 1.0", + "3d": "Datumaro 3D 1.0", +} + + def parse_frame_step(frame_filter: str) -> int: return int((frame_filter or "step=1").split("=")[1]) + + +def calc_end_frame(start_frame: int, stop_frame: int, frame_step: int) -> int: + return stop_frame - ((stop_frame - start_frame) % frame_step) + frame_step diff --git a/tests/python/sdk/fixtures.py b/tests/python/sdk/fixtures.py index fa1051141a33..f495c4ce8ef4 100644 --- a/tests/python/sdk/fixtures.py +++ b/tests/python/sdk/fixtures.py @@ -3,7 +3,6 @@ # SPDX-License-Identifier: MIT from pathlib import Path -from typing import Tuple from zipfile import ZipFile import pytest @@ -72,7 +71,7 @@ def fxt_coco_dataset(tmp_path: Path, fxt_image_file: Path, fxt_coco_file: Path): @pytest.fixture -def fxt_new_task(fxt_image_file: Path, fxt_login: Tuple[Client, str]): +def fxt_new_task(fxt_image_file: Path, fxt_login: tuple[Client, str]): client, _ = fxt_login task = client.tasks.create_from_data( spec={ @@ -87,7 +86,7 @@ def fxt_new_task(fxt_image_file: Path, fxt_login: Tuple[Client, str]): @pytest.fixture -def fxt_new_task_with_target_storage(fxt_image_file: Path, fxt_login: Tuple[Client, str]): +def fxt_new_task_with_target_storage(fxt_image_file: Path, fxt_login: tuple[Client, str]): client, _ = fxt_login task = client.tasks.create_from_data( spec={ diff --git a/tests/python/sdk/test_api_wrappers.py b/tests/python/sdk/test_api_wrappers.py index 84ec919c9ba2..f324637b78e9 100644 --- a/tests/python/sdk/test_api_wrappers.py +++ b/tests/python/sdk/test_api_wrappers.py @@ -2,6 +2,7 @@ # # SPDX-License-Identifier: MIT +import pickle from copy import deepcopy from cvat_sdk import models @@ -112,3 +113,12 @@ def test_models_do_not_return_internal_collections(): model_data2 = model.to_dict() assert DeepDiff(model_data1_original, model_data2) == {} + + +def test_models_are_pickleable(): + model = models.PatchedLabelRequest(id=5, name="person") + pickled_model = pickle.dumps(model) + unpickled_model = pickle.loads(pickled_model) + + assert unpickled_model.id == model.id + assert unpickled_model.name == model.name diff --git a/tests/python/sdk/test_auto_annotation.py b/tests/python/sdk/test_auto_annotation.py index e7ac8418b69a..9c41112edb0a 100644 --- a/tests/python/sdk/test_auto_annotation.py +++ b/tests/python/sdk/test_auto_annotation.py @@ -3,10 +3,10 @@ # SPDX-License-Identifier: MIT import io +import math from logging import Logger from pathlib import Path from types import SimpleNamespace as namespace -from typing import List, Tuple import cvat_sdk.auto_annotation as cvataa import PIL.Image @@ -27,9 +27,10 @@ @pytest.fixture(autouse=True) def _common_setup( tmp_path: Path, - fxt_login: Tuple[Client, str], - fxt_logger: Tuple[Logger, io.StringIO], + fxt_login: tuple[Client, str], + fxt_logger: tuple[Logger, io.StringIO], restore_redis_ondisk_per_function, + restore_redis_inmem_per_function, ): logger = fxt_logger[0] client = fxt_login[0] @@ -46,7 +47,7 @@ class TestTaskAutoAnnotation: def setup( self, tmp_path: Path, - fxt_login: Tuple[Client, str], + fxt_login: tuple[Client, str], ): self.client = fxt_login[0] self.images = [ @@ -114,7 +115,7 @@ def test_detection_rectangle(self): def detect( context: cvataa.DetectionFunctionContext, image: PIL.Image.Image - ) -> List[models.LabeledShapeRequest]: + ) -> list[models.LabeledShapeRequest]: assert context.frame_name in {"1.png", "2.png"} assert image.width == image.height == 333 return [ @@ -168,7 +169,7 @@ def test_detection_skeleton(self): ], ) - def detect(context, image: PIL.Image.Image) -> List[models.LabeledShapeRequest]: + def detect(context, image: PIL.Image.Image) -> list[models.LabeledShapeRequest]: assert image.width == image.height == 333 return [ cvataa.skeleton( @@ -241,7 +242,7 @@ def test_detection_without_clearing(self): ], ) - def detect(context, image: PIL.Image.Image) -> List[models.LabeledShapeRequest]: + def detect(context, image: PIL.Image.Image) -> list[models.LabeledShapeRequest]: return [ cvataa.rectangle( 123, # car @@ -270,6 +271,77 @@ def detect(context, image: PIL.Image.Image) -> List[models.LabeledShapeRequest]: assert shapes[i].points == [5, 6, 7, 8] assert shapes[i].rotation == 10 + def test_conf_threshold(self): + spec = cvataa.DetectionFunctionSpec(labels=[]) + + received_threshold = None + + def detect( + context: cvataa.DetectionFunctionContext, image: PIL.Image.Image + ) -> list[models.LabeledShapeRequest]: + nonlocal received_threshold + received_threshold = context.conf_threshold + return [] + + cvataa.annotate_task( + self.client, + self.task.id, + namespace(spec=spec, detect=detect), + conf_threshold=0.75, + ) + + assert received_threshold == 0.75 # python:S1244 NOSONAR + + cvataa.annotate_task( + self.client, + self.task.id, + namespace(spec=spec, detect=detect), + ) + + assert received_threshold is None + + for bad_threshold in [-0.1, 1.1]: + with pytest.raises(ValueError): + cvataa.annotate_task( + self.client, + self.task.id, + namespace(spec=spec, detect=detect), + conf_threshold=bad_threshold, + ) + + def test_conv_mask_to_poly(self): + spec = cvataa.DetectionFunctionSpec( + labels=[ + cvataa.label_spec("car", 123), + ], + ) + + received_cmtp = None + + def detect(context, image: PIL.Image.Image) -> list[models.LabeledShapeRequest]: + nonlocal received_cmtp + received_cmtp = context.conv_mask_to_poly + return [cvataa.mask(123, [1, 0, 0, 0, 0])] + + cvataa.annotate_task( + self.client, + self.task.id, + namespace(spec=spec, detect=detect), + conv_mask_to_poly=False, + ) + + assert received_cmtp is False + + with pytest.raises(cvataa.BadFunctionError, match=".*conv_mask_to_poly.*"): + cvataa.annotate_task( + self.client, + self.task.id, + namespace(spec=spec, detect=detect), + conv_mask_to_poly=True, + ) + + assert received_cmtp is True + def _test_bad_function_spec(self, spec: cvataa.DetectionFunctionSpec, exc_match: str) -> None: def detect(context, image): assert False @@ -570,14 +642,15 @@ def __init__(self, label_id: int) -> None: super().__init__() self._label_id = label_id - def forward(self, images: List[torch.Tensor]) -> List[dict]: + def forward(self, images: list[torch.Tensor]) -> list[dict]: assert isinstance(images, list) assert all(isinstance(t, torch.Tensor) for t in images) return [ { - "boxes": torch.tensor([[1, 2, 3, 4]]), - "labels": torch.tensor([self._label_id]), + "boxes": torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]), + "labels": torch.tensor([self._label_id, self._label_id]), + "scores": torch.tensor([0.75, 0.74]), } ] @@ -588,27 +661,83 @@ def fake_get_detection_model(name: str, weights, test_param): return FakeTorchvisionDetector(label_id=car_label_id) + class FakeTorchvisionInstanceSegmenter(nn.Module): + def __init__(self, label_id: int) -> None: + super().__init__() + self._label_id = label_id + + def forward(self, images: list[torch.Tensor]) -> list[dict]: + assert isinstance(images, list) + assert all(isinstance(t, torch.Tensor) for t in images) + + def make_box(im, a1, a2): + return [im.shape[2] * a1, im.shape[1] * a1, im.shape[2] * a2, im.shape[1] * a2] + + def make_mask(im, a1, a2): + # creates a rectangular mask with a hole + mask = torch.full((1, im.shape[1], im.shape[2]), 0.49) + mask[ + 0, + math.ceil(im.shape[1] * a1) : math.floor(im.shape[1] * a2), + math.ceil(im.shape[2] * a1) : math.floor(im.shape[2] * a2), + ] = 0.5 + mask[ + 0, + math.ceil(im.shape[1] * a1) + 3 : math.floor(im.shape[1] * a2) - 3, + math.ceil(im.shape[2] * a1) + 3 : math.floor(im.shape[2] * a2) - 3, + ] = 0.49 + return mask + + return [ + { + "labels": torch.tensor([self._label_id, self._label_id]), + "boxes": torch.tensor( + [ + make_box(im, 1 / 6, 1 / 3), + make_box(im, 2 / 3, 5 / 6), + ] + ), + "masks": torch.stack( + [ + make_mask(im, 1 / 6, 1 / 3), + make_mask(im, 2 / 3, 5 / 6), + ] + ), + "scores": torch.tensor([0.75, 0.74]), + } + for im in images + ] + + def fake_get_instance_segmentation_model(name: str, weights, test_param): + assert test_param == "expected_value" + + car_label_id = weights.meta["categories"].index("car") + + return FakeTorchvisionInstanceSegmenter(label_id=car_label_id) + class FakeTorchvisionKeypointDetector(nn.Module): - def __init__(self, label_id: int, keypoint_names: List[str]) -> None: + def __init__(self, label_id: int, keypoint_names: list[str]) -> None: super().__init__() self._label_id = label_id self._keypoint_names = keypoint_names - def forward(self, images: List[torch.Tensor]) -> List[dict]: + def forward(self, images: list[torch.Tensor]) -> list[dict]: assert isinstance(images, list) assert all(isinstance(t, torch.Tensor) for t in images) return [ { - "labels": torch.tensor([self._label_id]), + "labels": torch.tensor([self._label_id, self._label_id]), "keypoints": torch.tensor( [ [ [hash(name) % 100, 0, 1 if name.startswith("right_") else 0] for i, name in enumerate(self._keypoint_names) - ] + ], + [[0, 0, 1] for i, name in enumerate(self._keypoint_names)], ] ), + "scores": torch.tensor([0.75, 0.74]), } ] @@ -628,7 +757,7 @@ class TestAutoAnnotationFunctions: def setup( self, tmp_path: Path, - fxt_login: Tuple[Client, str], + fxt_login: tuple[Client, str], ): self.client = fxt_login[0] self.image = generate_image_file("1.png", size=(100, 100)) @@ -673,6 +802,7 @@ def test_torchvision_detection(self, monkeypatch: pytest.MonkeyPatch): self.task.id, td.create("fasterrcnn_resnet50_fpn_v2", "COCO_V1", test_param="expected_value"), allow_unmatched_labels=True, + conf_threshold=0.75, ) annotations = self.task.get_annotations() @@ -682,6 +812,54 @@ def test_torchvision_detection(self, monkeypatch: pytest.MonkeyPatch): assert annotations.shapes[0].type.value == "rectangle" assert annotations.shapes[0].points == [1, 2, 3, 4] + def test_torchvision_instance_segmentation(self, monkeypatch: pytest.MonkeyPatch): + monkeypatch.setattr(torchvision_models, "get_model", fake_get_instance_segmentation_model) + + import cvat_sdk.auto_annotation.functions.torchvision_instance_segmentation as tis + from cvat_sdk.masks import encode_mask + + cvataa.annotate_task( + self.client, + self.task.id, + tis.create("maskrcnn_resnet50_fpn_v2", "COCO_V1", test_param="expected_value"), + allow_unmatched_labels=True, + conf_threshold=0.75, + ) + + annotations = self.task.get_annotations() + + assert len(annotations.shapes) == 1 + assert self.task_labels_by_id[annotations.shapes[0].label_id].name == "car" + + expected_bitmap = torch.zeros((100, 100), dtype=torch.bool) + expected_bitmap[17:33, 17:33] = True + expected_bitmap[20:30, 20:30] = False + + assert annotations.shapes[0].type.value == "mask" + assert annotations.shapes[0].points == encode_mask(expected_bitmap, [16, 16, 34, 34]) + + cvataa.annotate_task( + self.client, + self.task.id, + tis.create("maskrcnn_resnet50_fpn_v2", "COCO_V1", test_param="expected_value"), + allow_unmatched_labels=True, + conf_threshold=0.75, + conv_mask_to_poly=True, + clear_existing=True, + ) + + annotations = self.task.get_annotations() + + assert len(annotations.shapes) == 1 + assert self.task_labels_by_id[annotations.shapes[0].label_id].name == "car" + assert annotations.shapes[0].type.value == "polygon" + + # We shouldn't rely on the exact result of polygon conversion, + # since it depends on a 3rd-party library. Instead, we'll just + # check that all points are within the expected area. + for x, y in zip(*[iter(annotations.shapes[0].points)] * 2): + assert expected_bitmap[round(y), round(x)] + def test_torchvision_keypoint_detection(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(torchvision_models, "get_model", fake_get_keypoint_detection_model) @@ -692,6 +870,7 @@ def test_torchvision_keypoint_detection(self, monkeypatch: pytest.MonkeyPatch): self.task.id, tkd.create("keypointrcnn_resnet50_fpn", "COCO_V1", test_param="expected_value"), allow_unmatched_labels=True, + conf_threshold=0.75, ) annotations = self.task.get_annotations() diff --git a/tests/python/sdk/test_client.py b/tests/python/sdk/test_client.py index 7554e8f5f2d8..38609176222c 100644 --- a/tests/python/sdk/test_client.py +++ b/tests/python/sdk/test_client.py @@ -5,7 +5,6 @@ import io from contextlib import ExitStack from logging import Logger -from typing import List, Tuple import packaging.version as pv import pytest @@ -22,7 +21,7 @@ class TestClientUsecases: def setup( self, restore_db_per_function, # force fixture call order to allow DB setup - fxt_logger: Tuple[Logger, io.StringIO], + fxt_logger: tuple[Logger, io.StringIO], fxt_client: Client, fxt_stdout: io.StringIO, admin_user: str, @@ -95,7 +94,7 @@ def test_can_reject_invalid_server_schema(): @pytest.mark.parametrize("raise_exception", (True, False)) def test_can_warn_on_mismatching_server_version( - fxt_logger: Tuple[Logger, io.StringIO], monkeypatch, raise_exception: bool + fxt_logger: tuple[Logger, io.StringIO], monkeypatch, raise_exception: bool ): logger, logger_stream = fxt_logger @@ -118,7 +117,7 @@ def mocked_version(_): @pytest.mark.parametrize("do_check", (True, False)) def test_can_check_server_version_in_ctor( - fxt_logger: Tuple[Logger, io.StringIO], monkeypatch, do_check: bool + fxt_logger: tuple[Logger, io.StringIO], monkeypatch, do_check: bool ): logger, logger_stream = fxt_logger @@ -141,7 +140,7 @@ def mocked_version(_): ) == do_check -def test_can_check_server_version_in_method(fxt_logger: Tuple[Logger, io.StringIO], monkeypatch): +def test_can_check_server_version_in_method(fxt_logger: tuple[Logger, io.StringIO], monkeypatch): logger, logger_stream = fxt_logger def mocked_version(_): @@ -183,10 +182,10 @@ def mocked_version(_): ], ) def test_can_check_server_version_compatibility( - fxt_logger: Tuple[Logger, io.StringIO], + fxt_logger: tuple[Logger, io.StringIO], monkeypatch: pytest.MonkeyPatch, server_version: str, - supported_versions: List[str], + supported_versions: list[str], expect_supported: bool, ): logger, _ = fxt_logger diff --git a/tests/python/sdk/test_datasets.py b/tests/python/sdk/test_datasets.py index 542ad9a1e80c..7f13e75ea92f 100644 --- a/tests/python/sdk/test_datasets.py +++ b/tests/python/sdk/test_datasets.py @@ -5,7 +5,6 @@ import io from logging import Logger from pathlib import Path -from typing import Tuple import cvat_sdk.datasets as cvatds import PIL.Image @@ -21,9 +20,10 @@ @pytest.fixture(autouse=True) def _common_setup( tmp_path: Path, - fxt_login: Tuple[Client, str], - fxt_logger: Tuple[Logger, io.StringIO], + fxt_login: tuple[Client, str], + fxt_logger: tuple[Logger, io.StringIO], restore_redis_ondisk_per_function, + restore_redis_inmem_per_function, ): logger = fxt_logger[0] client = fxt_login[0] @@ -40,7 +40,7 @@ class TestTaskDataset: def setup( self, tmp_path: Path, - fxt_login: Tuple[Client, str], + fxt_login: tuple[Client, str], ): self.client = fxt_login[0] self.images = generate_image_files(10) diff --git a/tests/python/sdk/test_issues_comments.py b/tests/python/sdk/test_issues_comments.py index 12047c75a1f0..f90b663fbefe 100644 --- a/tests/python/sdk/test_issues_comments.py +++ b/tests/python/sdk/test_issues_comments.py @@ -5,7 +5,6 @@ import io from logging import Logger from pathlib import Path -from typing import Tuple import pytest from cvat_sdk import Client @@ -18,8 +17,8 @@ class TestIssuesUsecases: def setup( self, tmp_path: Path, - fxt_login: Tuple[Client, str], - fxt_logger: Tuple[Logger, io.StringIO], + fxt_login: tuple[Client, str], + fxt_logger: tuple[Logger, io.StringIO], fxt_stdout: io.StringIO, ): self.tmp_path = tmp_path @@ -139,8 +138,8 @@ class TestCommentsUsecases: def setup( self, tmp_path: Path, - fxt_login: Tuple[Client, str], - fxt_logger: Tuple[Logger, io.StringIO], + fxt_login: tuple[Client, str], + fxt_logger: tuple[Logger, io.StringIO], fxt_stdout: io.StringIO, ): self.tmp_path = tmp_path diff --git a/tests/python/sdk/test_jobs.py b/tests/python/sdk/test_jobs.py index 3202e2957ff0..3d49978d5da5 100644 --- a/tests/python/sdk/test_jobs.py +++ b/tests/python/sdk/test_jobs.py @@ -5,7 +5,7 @@ import io from logging import Logger from pathlib import Path -from typing import Optional, Tuple +from typing import Optional import pytest from cvat_sdk import Client @@ -26,8 +26,8 @@ class TestJobUsecases(TestDatasetExport): def setup( self, tmp_path: Path, - fxt_login: Tuple[Client, str], - fxt_logger: Tuple[Logger, io.StringIO], + fxt_login: tuple[Client, str], + fxt_logger: tuple[Logger, io.StringIO], fxt_stdout: io.StringIO, restore_redis_ondisk_per_function, ): diff --git a/tests/python/sdk/test_masks.py b/tests/python/sdk/test_masks.py new file mode 100644 index 000000000000..46e8b9f214cc --- /dev/null +++ b/tests/python/sdk/test_masks.py @@ -0,0 +1,71 @@ +# Copyright (C) 2024 CVAT.ai Corporation +# +# SPDX-License-Identifier: MIT + +import pytest + +try: + import numpy as np + from cvat_sdk.masks import encode_mask + +except ModuleNotFoundError as e: + if e.name.split(".")[0] != "numpy": + raise + + encode_mask = None + + +@pytest.mark.skipif(encode_mask is None, reason="NumPy is not installed") +class TestMasks: + def test_encode_mask(self): + bitmap = np.array( + [ + np.fromstring("0 0 1 1 1 0", sep=" "), + np.fromstring("0 1 1 0 0 0", sep=" "), + ], + dtype=np.bool_, + ) + bbox = [2.9, 0.9, 4.1, 1.1] # will get rounded to [2, 0, 5, 2] + + # There's slightly different logic for when the cropped mask starts with + # 0 and 1, so test both. + # This one starts with 1: + # 111 + # 100 + + assert encode_mask(bitmap, bbox) == [0, 4, 2, 2, 0, 4, 1] + + bbox = [1, 0, 5, 2] + + # This one starts with 0: + # 0111 + # 1100 + + assert encode_mask(bitmap, bbox) == [1, 5, 2, 1, 0, 4, 1] + + # Edge case: full image + bbox = [0, 0, 6, 2] + assert encode_mask(bitmap, bbox) == [2, 3, 2, 2, 3, 0, 0, 5, 1] + + def test_encode_mask_invalid_dim(self): + with pytest.raises(ValueError, match="bitmap must have 2 dimensions"): + encode_mask([True], [0, 0, 1, 1]) + + def test_encode_mask_invalid_dtype(self): + with pytest.raises(ValueError, match="bitmap must have boolean items"): + encode_mask([[1]], [0, 0, 1, 1]) + + @pytest.mark.parametrize( + "bbox", + [ + [-0.1, 0, 1, 1], + [0, -0.1, 1, 1], + [0, 0, 1.1, 1], + [0, 0, 1, 1.1], + [1, 0, 0, 1], + [0, 1, 1, 0], + ], + ) + def test_encode_mask_invalid_bbox(self, bbox): + with pytest.raises(ValueError, match="bbox has invalid coordinates"): + encode_mask([[True]], bbox) diff --git a/tests/python/sdk/test_organizations.py b/tests/python/sdk/test_organizations.py index 84198c73b24f..54f9798cc849 100644 --- a/tests/python/sdk/test_organizations.py +++ b/tests/python/sdk/test_organizations.py @@ -4,7 +4,6 @@ import io from logging import Logger -from typing import Tuple import pytest from cvat_sdk import Client, models @@ -16,8 +15,8 @@ class TestOrganizationUsecases: @pytest.fixture(autouse=True) def setup( self, - fxt_login: Tuple[Client, str], - fxt_logger: Tuple[Logger, io.StringIO], + fxt_login: tuple[Client, str], + fxt_logger: tuple[Logger, io.StringIO], fxt_stdout: io.StringIO, ): logger, self.logger_stream = fxt_logger diff --git a/tests/python/sdk/test_projects.py b/tests/python/sdk/test_projects.py index b03df660d87a..db0b5f265586 100644 --- a/tests/python/sdk/test_projects.py +++ b/tests/python/sdk/test_projects.py @@ -5,7 +5,7 @@ import io from logging import Logger from pathlib import Path -from typing import Optional, Tuple +from typing import Optional import pytest from cvat_sdk import Client, models @@ -29,8 +29,8 @@ class TestProjectUsecases(TestDatasetExport): def setup( self, tmp_path: Path, - fxt_login: Tuple[Client, str], - fxt_logger: Tuple[Logger, io.StringIO], + fxt_login: tuple[Client, str], + fxt_logger: tuple[Logger, io.StringIO], fxt_stdout: io.StringIO, restore_redis_ondisk_per_function, ): diff --git a/tests/python/sdk/test_pytorch.py b/tests/python/sdk/test_pytorch.py index 2bcbd122abff..1427a070d46b 100644 --- a/tests/python/sdk/test_pytorch.py +++ b/tests/python/sdk/test_pytorch.py @@ -7,7 +7,6 @@ import os from logging import Logger from pathlib import Path -from typing import Tuple import pytest from cvat_sdk import Client, models @@ -34,9 +33,10 @@ @pytest.fixture(autouse=True) def _common_setup( tmp_path: Path, - fxt_login: Tuple[Client, str], - fxt_logger: Tuple[Logger, io.StringIO], + fxt_login: tuple[Client, str], + fxt_logger: tuple[Logger, io.StringIO], restore_redis_ondisk_per_function, + restore_redis_inmem_per_function, ): logger = fxt_logger[0] client = fxt_login[0] @@ -54,7 +54,7 @@ class TestTaskVisionDataset: def setup( self, tmp_path: Path, - fxt_login: Tuple[Client, str], + fxt_login: tuple[Client, str], ): self.client = fxt_login[0] self.images = generate_image_files(10) @@ -298,7 +298,7 @@ class TestProjectVisionDataset: def setup( self, tmp_path: Path, - fxt_login: Tuple[Client, str], + fxt_login: tuple[Client, str], ): self.client = fxt_login[0] diff --git a/tests/python/sdk/test_tasks.py b/tests/python/sdk/test_tasks.py index 54e0823d3311..0181d5c74d3b 100644 --- a/tests/python/sdk/test_tasks.py +++ b/tests/python/sdk/test_tasks.py @@ -7,7 +7,7 @@ import zipfile from logging import Logger from pathlib import Path -from typing import Optional, Tuple +from typing import Optional import pytest from cvat_sdk import Client, models @@ -30,8 +30,8 @@ class TestTaskUsecases(TestDatasetExport): def setup( self, tmp_path: Path, - fxt_login: Tuple[Client, str], - fxt_logger: Tuple[Logger, io.StringIO], + fxt_login: tuple[Client, str], + fxt_logger: tuple[Logger, io.StringIO], fxt_stdout: io.StringIO, restore_redis_ondisk_per_function, ): diff --git a/tests/python/sdk/test_users.py b/tests/python/sdk/test_users.py index 94c61adac3db..fbdc675e5dd2 100644 --- a/tests/python/sdk/test_users.py +++ b/tests/python/sdk/test_users.py @@ -5,7 +5,6 @@ import io from logging import Logger from pathlib import Path -from typing import Tuple import pytest from cvat_sdk import Client, models @@ -17,8 +16,8 @@ class TestUserUsecases: def setup( self, tmp_path: Path, - fxt_login: Tuple[Client, str], - fxt_logger: Tuple[Logger, io.StringIO], + fxt_login: tuple[Client, str], + fxt_logger: tuple[Logger, io.StringIO], fxt_stdout: io.StringIO, ): self.tmp_path = tmp_path diff --git a/tests/python/sdk/util.py b/tests/python/sdk/util.py index 1686330ad9f1..4384a88d418d 100644 --- a/tests/python/sdk/util.py +++ b/tests/python/sdk/util.py @@ -3,8 +3,8 @@ # SPDX-License-Identifier: MIT import textwrap +from collections.abc import Container from pathlib import Path -from typing import Container, Tuple from urllib.parse import urlparse import pytest @@ -16,7 +16,7 @@ def make_pbar(file, **kwargs): return DeferredTqdmProgressReporter({"file": file, "mininterval": 0, **kwargs}) -def generate_coco_json(filename: Path, img_info: Tuple[Path, int, int]): +def generate_coco_json(filename: Path, img_info: tuple[Path, int, int]): image_filename, image_width, image_height = img_info content = generate_coco_anno( diff --git a/tests/python/shared/assets/cloudstorages.json b/tests/python/shared/assets/cloudstorages.json index 4cda853ec930..8d8d92009aad 100644 --- a/tests/python/shared/assets/cloudstorages.json +++ b/tests/python/shared/assets/cloudstorages.json @@ -36,11 +36,11 @@ ], "organization": 2, "owner": { - "first_name": "Business", + "first_name": "User", "id": 11, - "last_name": "Second", + "last_name": "Eighth", "url": "http://localhost:8080/api/users/11", - "username": "business2" + "username": "user8" }, "provider_type": "AWS_S3_BUCKET", "resource": "private", diff --git a/tests/python/shared/assets/comments.json b/tests/python/shared/assets/comments.json index f1f7457eae75..4681af9bd7dc 100644 --- a/tests/python/shared/assets/comments.json +++ b/tests/python/shared/assets/comments.json @@ -37,11 +37,11 @@ "issue": 3, "message": "Another one issue", "owner": { - "first_name": "Business", + "first_name": "User", "id": 11, - "last_name": "Second", + "last_name": "Eighth", "url": "http://localhost:8080/api/users/11", - "username": "business2" + "username": "user8" }, "updated_date": "2022-03-16T11:08:18.370000Z" }, @@ -51,11 +51,11 @@ "issue": 2, "message": "Something should be here", "owner": { - "first_name": "Business", + "first_name": "User", "id": 11, - "last_name": "Second", + "last_name": "Eighth", "url": "http://localhost:8080/api/users/11", - "username": "business2" + "username": "user8" }, "updated_date": "2022-03-16T11:07:22.173000Z" }, diff --git a/tests/python/shared/assets/cvat_db/data.json b/tests/python/shared/assets/cvat_db/data.json index 8a836d7d6958..5b30d421cb5a 100644 --- a/tests/python/shared/assets/cvat_db/data.json +++ b/tests/python/shared/assets/cvat_db/data.json @@ -10,14 +10,6 @@ { "model": "auth.group", "pk": 2, - "fields": { - "name": "business", - "permissions": [] - } -}, -{ - "model": "auth.group", - "pk": 3, "fields": { "name": "user", "permissions": [] @@ -25,7 +17,7 @@ }, { "model": "auth.group", - "pk": 4, + "pk": 3, "fields": { "name": "worker", "permissions": [] @@ -236,16 +228,16 @@ "password": "md5$6TyZJsUJ2hAbICwZHKp4p0$961841748b31d28bcaf3094e549d2bd5", "last_login": "2022-09-28T12:17:51.373Z", "is_superuser": false, - "username": "business1", - "first_name": "Business", - "last_name": "First", - "email": "business1@cvat.org", + "username": "user7", + "first_name": "User", + "last_name": "Seventh", + "email": "user7@cvat.org", "is_staff": false, "is_active": true, "date_joined": "2021-12-14T18:33:06Z", "groups": [ [ - "business" + "user" ] ], "user_permissions": [] @@ -258,16 +250,16 @@ "password": "md5$oLNLFFMdjViRqnAw1th3Zl$d816d16307053866451da43fb4443b66", "last_login": "2022-03-17T07:22:55.930Z", "is_superuser": false, - "username": "business2", - "first_name": "Business", - "last_name": "Second", - "email": "business2@cvat.org", + "username": "user8", + "first_name": "User", + "last_name": "Eighth", + "email": "user8@cvat.org", "is_staff": false, "is_active": true, "date_joined": "2021-12-14T18:34:01Z", "groups": [ [ - "business" + "user" ] ], "user_permissions": [] @@ -280,16 +272,16 @@ "password": "md5$7ETBhORLrHl45WPL9CkxnN$af77496152b60ffc73ef877c99807385", "last_login": null, "is_superuser": false, - "username": "business3", - "first_name": "Business", - "last_name": "Third", - "email": "business3@cvat.org", + "username": "user9", + "first_name": "User", + "last_name": "Nineth", + "email": "user9@cvat.org", "is_staff": false, "is_active": true, "date_joined": "2021-12-14T18:34:34Z", "groups": [ [ - "business" + "user" ] ], "user_permissions": [] @@ -302,16 +294,16 @@ "password": "md5$9huaZ72ncQGfmxUqU3Hwnz$6b010216eea87409f0aca7126bd80bbd", "last_login": null, "is_superuser": false, - "username": "business4", - "first_name": "Business", - "last_name": "Fourth", - "email": "business4@cvat.org", + "username": "user10", + "first_name": "User", + "last_name": "Tenth", + "email": "user10@cvat.org", "is_staff": false, "is_active": true, "date_joined": "2021-12-14T18:35:15Z", "groups": [ [ - "business" + "user" ] ], "user_permissions": [] @@ -734,7 +726,7 @@ "pk": "53da3ff9e514d84b56b5170059ff0f595c34157b", "fields": { "user": [ - "business2" + "user8" ], "created": "2022-03-17T07:22:55.921Z" } @@ -754,7 +746,7 @@ "pk": "c051fe19df24a0ac4c6bec5e635034271c9549dc", "fields": { "user": [ - "business1" + "user7" ], "created": "2023-05-01T08:42:48.127Z" } @@ -833,7 +825,7 @@ "email": "org2@cvat.org" }, "owner": [ - "business1" + "user7" ] } }, @@ -881,7 +873,7 @@ "pk": 4, "fields": { "user": [ - "business1" + "user7" ], "organization": 1, "is_active": true, @@ -894,7 +886,7 @@ "pk": 5, "fields": { "user": [ - "business1" + "user7" ], "organization": 2, "is_active": true, @@ -907,7 +899,7 @@ "pk": 6, "fields": { "user": [ - "business2" + "user8" ], "organization": 2, "is_active": true, @@ -1024,7 +1016,7 @@ "pk": 15, "fields": { "user": [ - "business2" + "user8" ], "organization": 1, "is_active": true, @@ -1039,7 +1031,7 @@ "created_date": "2022-01-19T13:54:42.005Z", "sent_date": "2022-01-19T13:54:42.005Z", "owner": [ - "business1" + "user7" ], "membership": 10 } @@ -1051,7 +1043,7 @@ "created_date": "2021-12-14T19:54:46.172Z", "sent_date": "2021-12-14T19:54:46.172Z", "owner": [ - "business1" + "user7" ], "membership": 7 } @@ -1063,7 +1055,7 @@ "created_date": "2022-01-19T13:54:42.015Z", "sent_date": "2022-01-19T13:54:42.015Z", "owner": [ - "business1" + "user7" ], "membership": 11 } @@ -1099,7 +1091,7 @@ "created_date": "2021-12-14T19:54:33.591Z", "sent_date": "2021-12-14T19:54:33.591Z", "owner": [ - "business1" + "user7" ], "membership": 6 } @@ -1147,7 +1139,7 @@ "created_date": "2021-12-14T19:55:13.745Z", "sent_date": "2021-12-14T19:55:13.745Z", "owner": [ - "business1" + "user7" ], "membership": 9 } @@ -1171,7 +1163,7 @@ "created_date": "2021-12-14T19:54:56.431Z", "sent_date": "2021-12-14T19:54:56.431Z", "owner": [ - "business1" + "user7" ], "membership": 8 } @@ -3896,7 +3888,7 @@ "updated_date": "2022-11-03T13:57:25.895Z", "name": "project1", "owner": [ - "business1" + "user7" ], "assignee": [ "user6" @@ -3917,7 +3909,7 @@ "updated_date": "2022-06-30T08:56:45.601Z", "name": "project2", "owner": [ - "business1" + "user7" ], "assignee": [ "user2" @@ -4000,7 +3992,7 @@ "user1" ], "assignee": [ - "business4" + "user10" ], "assignee_updated_date": null, "bug_tracker": "", @@ -4179,7 +4171,7 @@ "admin1" ], "assignee": [ - "business1" + "user7" ], "assignee_updated_date": "2024-09-23T08:09:45.461Z", "bug_tracker": "", @@ -4281,7 +4273,7 @@ "name": "task_2_org2", "mode": "annotation", "owner": [ - "business2" + "user8" ], "assignee": [ "worker2" @@ -4337,7 +4329,7 @@ "name": "task1_in_project1", "mode": "annotation", "owner": [ - "business1" + "user7" ], "assignee": [ "admin1" @@ -4365,7 +4357,7 @@ "name": "task1_in_project2", "mode": "annotation", "owner": [ - "business1" + "user7" ], "assignee": [ "user5" @@ -4738,7 +4730,7 @@ "user3" ], "assignee": [ - "business1" + "user7" ], "assignee_updated_date": "2024-09-23T10:51:45.525Z", "bug_tracker": "", @@ -6180,54 +6172,6 @@ "frames": "[]" } }, -{ - "model": "engine.segment", - "pk": 38, - "fields": { - "task": 29, - "start_frame": 0, - "stop_frame": 7, - "chunks_updated_date": "2024-10-02T08:13:16.623Z", - "type": "range", - "frames": "[]" - } -}, -{ - "model": "engine.segment", - "pk": 39, - "fields": { - "task": 29, - "start_frame": 8, - "stop_frame": 15, - "chunks_updated_date": "2024-10-02T08:13:16.623Z", - "type": "range", - "frames": "[]" - } -}, -{ - "model": "engine.segment", - "pk": 40, - "fields": { - "task": 29, - "start_frame": 16, - "stop_frame": 22, - "chunks_updated_date": "2024-10-02T08:13:16.623Z", - "type": "range", - "frames": "[]" - } -}, -{ - "model": "engine.segment", - "pk": 41, - "fields": { - "task": 29, - "start_frame": 23, - "stop_frame": 28, - "chunks_updated_date": "2024-10-02T08:13:16.623Z", - "type": "range", - "frames": "[]" - } -}, { "model": "engine.job", "pk": 2, @@ -12710,7 +12654,8 @@ "user": [ "admin1" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": true } }, { @@ -12720,7 +12665,8 @@ "user": [ "user1" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12730,7 +12676,8 @@ "user": [ "user2" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12740,7 +12687,8 @@ "user": [ "user3" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12750,7 +12698,8 @@ "user": [ "user4" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": true } }, { @@ -12760,7 +12709,8 @@ "user": [ "worker1" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12770,7 +12720,8 @@ "user": [ "worker2" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12780,7 +12731,8 @@ "user": [ "worker3" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12790,7 +12742,8 @@ "user": [ "worker4" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": true } }, { @@ -12798,9 +12751,10 @@ "pk": 10, "fields": { "user": [ - "business1" + "user7" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12808,9 +12762,10 @@ "pk": 11, "fields": { "user": [ - "business2" + "user8" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12818,9 +12773,10 @@ "pk": 12, "fields": { "user": [ - "business3" + "user9" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12828,9 +12784,10 @@ "pk": 13, "fields": { "user": [ - "business4" + "user10" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12840,7 +12797,8 @@ "user": [ "dummy1" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12850,7 +12808,8 @@ "user": [ "dummy2" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12860,7 +12819,8 @@ "user": [ "dummy3" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12870,7 +12830,8 @@ "user": [ "dummy4" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12880,7 +12841,8 @@ "user": [ "admin2" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": true } }, { @@ -12890,7 +12852,8 @@ "user": [ "user5" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12900,7 +12863,8 @@ "user": [ "user6" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12910,7 +12874,8 @@ "user": [ "lonely_user" ], - "rating": 0.0 + "rating": 0.0, + "has_analytics_access": false } }, { @@ -12939,7 +12904,7 @@ "position": "98.48046875, 696.72265625, 326.1220703125, 841.5859375", "job": 9, "owner": [ - "business2" + "user8" ], "assignee": null, "resolved": false @@ -12955,7 +12920,7 @@ "position": "108.1845703125, 235.0, 720.0087890625, 703.3505859375", "job": 16, "owner": [ - "business2" + "user8" ], "assignee": null, "resolved": false @@ -13029,7 +12994,7 @@ "updated_date": "2022-03-16T11:07:22.173Z", "issue": 2, "owner": [ - "business2" + "user8" ], "message": "Something should be here" } @@ -13042,7 +13007,7 @@ "updated_date": "2022-03-16T11:08:18.370Z", "issue": 3, "owner": [ - "business2" + "user8" ], "message": "Another one issue" } @@ -13126,7 +13091,7 @@ "resource": "private", "display_name": "Bucket 2", "owner": [ - "business2" + "user8" ], "credentials": "minio_access_key minio_secret_key", "credentials_type": "KEY_SECRET_KEY_PAIR", @@ -13590,7 +13555,7 @@ "is_active": true, "enable_ssl": true, "owner": [ - "business1" + "user7" ], "project": 1, "organization": null @@ -13709,9 +13674,9 @@ "user": { "id": 11, "url": "http://localhost:8080/api/users/11", - "username": "business2", - "last_name": "Second", - "first_name": "Business" + "username": "user8", + "last_name": "Eighth", + "first_name": "User" }, "owner": { "id": 2, @@ -13798,9 +13763,9 @@ "user": { "id": 11, "url": "http://localhost:8080/api/users/11", - "username": "business2", - "last_name": "Second", - "first_name": "Business" + "username": "user8", + "last_name": "Eighth", + "first_name": "User" }, "is_active": true, "invitation": "q8GWTPiR1Vz9DDO6MQo1B6pUBzW9GjDb6AUQPziAV62jD7OpCLZji0GS66C48wRX", @@ -18199,6 +18164,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18207,6 +18173,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18221,6 +18188,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18229,6 +18197,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18243,6 +18212,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18251,6 +18221,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18265,6 +18236,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18273,6 +18245,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18287,6 +18260,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18295,6 +18269,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18309,6 +18284,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18317,6 +18293,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18331,6 +18308,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18339,6 +18317,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18353,6 +18332,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18361,6 +18341,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18375,6 +18356,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18383,6 +18365,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18397,6 +18380,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18405,6 +18389,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18419,6 +18404,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18427,6 +18413,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18441,6 +18428,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18449,6 +18437,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18463,6 +18452,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18471,6 +18461,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18485,6 +18476,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18493,6 +18485,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18507,6 +18500,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18515,6 +18509,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18529,6 +18524,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18537,6 +18533,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18551,6 +18548,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18559,6 +18557,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18573,6 +18572,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18581,6 +18581,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18595,6 +18596,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18603,6 +18605,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18617,6 +18620,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18625,6 +18629,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18639,6 +18644,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18647,6 +18653,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18661,6 +18668,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18669,6 +18677,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18683,6 +18692,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18691,6 +18701,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -18705,6 +18716,7 @@ "oks_sigma": 0.09, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "point_size_base": "group_bbox_size", "compare_line_orientation": true, "line_orientation_threshold": 0.1, "compare_groups": true, @@ -18713,6 +18725,7 @@ "object_visibility_threshold": 0.05, "panoptic_comparison": true, "compare_attributes": true, + "match_empty_frames": false, "target_metric": "accuracy", "target_metric_threshold": 0.7, "max_validations_per_job": 0 @@ -19037,7 +19050,7 @@ "user" ], "object_id": "10", - "object_repr": "business1", + "object_repr": "user7", "action_flag": 1, "change_message": "[{\"added\": {}}]" } @@ -19055,7 +19068,7 @@ "user" ], "object_id": "10", - "object_repr": "business1", + "object_repr": "user7", "action_flag": 2, "change_message": "[{\"changed\": {\"fields\": [\"First name\", \"Last name\"]}}]" } @@ -19073,7 +19086,7 @@ "user" ], "object_id": "10", - "object_repr": "business1", + "object_repr": "user7", "action_flag": 2, "change_message": "[{\"changed\": {\"fields\": [\"Last name\", \"Email address\", \"Groups\"]}}]" } @@ -19091,7 +19104,7 @@ "user" ], "object_id": "11", - "object_repr": "business2", + "object_repr": "user8", "action_flag": 1, "change_message": "[{\"added\": {}}]" } @@ -19109,7 +19122,7 @@ "user" ], "object_id": "11", - "object_repr": "business2", + "object_repr": "user8", "action_flag": 2, "change_message": "[{\"changed\": {\"fields\": [\"First name\", \"Last name\", \"Email address\", \"Groups\"]}}]" } @@ -19127,7 +19140,7 @@ "user" ], "object_id": "12", - "object_repr": "business3", + "object_repr": "user9", "action_flag": 1, "change_message": "[{\"added\": {}}]" } @@ -19145,7 +19158,7 @@ "user" ], "object_id": "12", - "object_repr": "business3", + "object_repr": "user9", "action_flag": 2, "change_message": "[{\"changed\": {\"fields\": [\"First name\", \"Last name\", \"Email address\", \"Groups\"]}}]" } @@ -19163,7 +19176,7 @@ "user" ], "object_id": "13", - "object_repr": "business4", + "object_repr": "user10", "action_flag": 1, "change_message": "[{\"added\": {}}]" } @@ -19181,7 +19194,7 @@ "user" ], "object_id": "13", - "object_repr": "business4", + "object_repr": "user10", "action_flag": 2, "change_message": "[{\"changed\": {\"fields\": [\"First name\", \"Last name\", \"Email address\", \"Groups\"]}}]" } diff --git a/tests/python/shared/assets/invitations.json b/tests/python/shared/assets/invitations.json index 6b0f24528202..9a58f4bbee09 100644 --- a/tests/python/shared/assets/invitations.json +++ b/tests/python/shared/assets/invitations.json @@ -21,11 +21,11 @@ }, "role": "maintainer", "user": { - "first_name": "Business", + "first_name": "User", "id": 11, - "last_name": "Second", + "last_name": "Eighth", "url": "http://localhost:8080/api/users/11", - "username": "business2" + "username": "user8" } }, { @@ -113,11 +113,11 @@ "slug": "org2" }, "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "role": "maintainer", "user": { @@ -138,11 +138,11 @@ "slug": "org2" }, "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "role": "supervisor", "user": { @@ -163,11 +163,11 @@ "slug": "org2" }, "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "role": "supervisor", "user": { @@ -188,11 +188,11 @@ "slug": "org2" }, "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "role": "worker", "user": { @@ -213,11 +213,11 @@ "slug": "org2" }, "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "role": "worker", "user": { @@ -238,19 +238,19 @@ "slug": "org2" }, "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "role": "maintainer", "user": { - "first_name": "Business", + "first_name": "User", "id": 11, - "last_name": "Second", + "last_name": "Eighth", "url": "http://localhost:8080/api/users/11", - "username": "business2" + "username": "user8" } }, { @@ -271,11 +271,11 @@ }, "role": "maintainer", "user": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" } }, { diff --git a/tests/python/shared/assets/issues.json b/tests/python/shared/assets/issues.json index 9aff3cf4b632..e719b30ac11d 100644 --- a/tests/python/shared/assets/issues.json +++ b/tests/python/shared/assets/issues.json @@ -72,11 +72,11 @@ "id": 3, "job": 16, "owner": { - "first_name": "Business", + "first_name": "User", "id": 11, - "last_name": "Second", + "last_name": "Eighth", "url": "http://localhost:8080/api/users/11", - "username": "business2" + "username": "user8" }, "position": [ 108.1845703125, @@ -98,11 +98,11 @@ "id": 2, "job": 9, "owner": { - "first_name": "Business", + "first_name": "User", "id": 11, - "last_name": "Second", + "last_name": "Eighth", "url": "http://localhost:8080/api/users/11", - "username": "business2" + "username": "user8" }, "position": [ 98.48046875, diff --git a/tests/python/shared/assets/memberships.json b/tests/python/shared/assets/memberships.json index 9ae6bcc8d950..3c0be8035d2d 100644 --- a/tests/python/shared/assets/memberships.json +++ b/tests/python/shared/assets/memberships.json @@ -11,11 +11,11 @@ "organization": 1, "role": "maintainer", "user": { - "first_name": "Business", + "first_name": "User", "id": 11, - "last_name": "Second", + "last_name": "Eighth", "url": "http://localhost:8080/api/users/11", - "username": "business2" + "username": "user8" } }, { @@ -146,11 +146,11 @@ "organization": 2, "role": "maintainer", "user": { - "first_name": "Business", + "first_name": "User", "id": 11, - "last_name": "Second", + "last_name": "Eighth", "url": "http://localhost:8080/api/users/11", - "username": "business2" + "username": "user8" } }, { @@ -161,11 +161,11 @@ "organization": 2, "role": "owner", "user": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" } }, { @@ -176,11 +176,11 @@ "organization": 1, "role": "maintainer", "user": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" } }, { diff --git a/tests/python/shared/assets/organizations.json b/tests/python/shared/assets/organizations.json index ad26620a27e0..8106c5b8b6a7 100644 --- a/tests/python/shared/assets/organizations.json +++ b/tests/python/shared/assets/organizations.json @@ -12,11 +12,11 @@ "id": 2, "name": "Organization #2", "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "slug": "org2", "updated_date": "2021-12-14T19:51:38.667000Z" diff --git a/tests/python/shared/assets/projects.json b/tests/python/shared/assets/projects.json index f7c0c25b464e..19d345ee4e30 100644 --- a/tests/python/shared/assets/projects.json +++ b/tests/python/shared/assets/projects.json @@ -5,11 +5,11 @@ "results": [ { "assignee": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "assignee_updated_date": "2024-09-23T08:09:45.461000Z", "bug_tracker": "", @@ -383,11 +383,11 @@ }, { "assignee": { - "first_name": "Business", + "first_name": "User", "id": 13, - "last_name": "Fourth", + "last_name": "Tenth", "url": "http://localhost:8080/api/users/13", - "username": "business4" + "username": "user10" }, "assignee_updated_date": null, "bug_tracker": "", @@ -553,11 +553,11 @@ "name": "project2", "organization": 2, "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "source_storage": { "cloud_storage_id": 3, @@ -600,11 +600,11 @@ "name": "project1", "organization": null, "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "source_storage": null, "status": "annotation", diff --git a/tests/python/shared/assets/quality_settings.json b/tests/python/shared/assets/quality_settings.json index 54e0c18c63a3..7ddc589bc7bf 100644 --- a/tests/python/shared/assets/quality_settings.json +++ b/tests/python/shared/assets/quality_settings.json @@ -14,10 +14,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 2 @@ -33,10 +35,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 5 @@ -52,10 +56,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 6 @@ -71,10 +77,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 7 @@ -90,10 +98,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 8 @@ -109,10 +119,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 9 @@ -128,10 +140,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 11 @@ -147,10 +161,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 12 @@ -166,10 +182,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 13 @@ -185,10 +203,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 14 @@ -204,10 +224,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 15 @@ -223,10 +245,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 17 @@ -242,10 +266,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 18 @@ -261,10 +287,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 19 @@ -280,10 +308,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 20 @@ -299,10 +329,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 21 @@ -318,10 +350,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 22 @@ -337,10 +371,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 23 @@ -356,10 +392,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 24 @@ -375,10 +413,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 25 @@ -394,10 +434,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 26 @@ -413,10 +455,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 27 @@ -432,10 +476,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 28 @@ -451,10 +497,12 @@ "line_orientation_threshold": 0.1, "line_thickness": 0.01, "low_overlap_threshold": 0.8, + "match_empty_frames": false, "max_validations_per_job": 0, "object_visibility_threshold": 0.05, "oks_sigma": 0.09, "panoptic_comparison": true, + "point_size_base": "group_bbox_size", "target_metric": "accuracy", "target_metric_threshold": 0.7, "task_id": 29 diff --git a/tests/python/shared/assets/tasks.json b/tests/python/shared/assets/tasks.json index 5a28176ef5ec..cf2d63da785c 100644 --- a/tests/python/shared/assets/tasks.json +++ b/tests/python/shared/assets/tasks.json @@ -159,11 +159,11 @@ }, { "assignee": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "assignee_updated_date": "2024-09-23T10:51:45.525000Z", "bug_tracker": "", @@ -890,11 +890,11 @@ "organization": 2, "overlap": 0, "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "project_id": 2, "segment_size": 11, @@ -948,11 +948,11 @@ "organization": null, "overlap": 0, "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "project_id": 1, "segment_size": 5, @@ -1048,11 +1048,11 @@ "organization": 2, "overlap": 0, "owner": { - "first_name": "Business", + "first_name": "User", "id": 11, - "last_name": "Second", + "last_name": "Eighth", "url": "http://localhost:8080/api/users/11", - "username": "business2" + "username": "user8" }, "project_id": null, "segment_size": 11, diff --git a/tests/python/shared/assets/users.json b/tests/python/shared/assets/users.json index 7a3172c70a43..9c4dce1e4fdf 100644 --- a/tests/python/shared/assets/users.json +++ b/tests/python/shared/assets/users.json @@ -10,6 +10,7 @@ "groups": [ "user" ], + "has_analytics_access": false, "id": 21, "is_active": true, "is_staff": false, @@ -26,6 +27,7 @@ "groups": [ "user" ], + "has_analytics_access": false, "id": 20, "is_active": true, "is_staff": false, @@ -42,6 +44,7 @@ "groups": [ "user" ], + "has_analytics_access": false, "id": 19, "is_active": true, "is_staff": false, @@ -58,6 +61,7 @@ "groups": [ "admin" ], + "has_analytics_access": true, "id": 18, "is_active": true, "is_staff": true, @@ -72,6 +76,7 @@ "email": "dummy4@cvat.org", "first_name": "Dummy", "groups": [], + "has_analytics_access": false, "id": 17, "is_active": true, "is_staff": false, @@ -86,6 +91,7 @@ "email": "dummy3@cvat.org", "first_name": "Dummy", "groups": [], + "has_analytics_access": false, "id": 16, "is_active": true, "is_staff": false, @@ -100,6 +106,7 @@ "email": "dummy2@cvat.org", "first_name": "Dummy", "groups": [], + "has_analytics_access": false, "id": 15, "is_active": true, "is_staff": false, @@ -114,6 +121,7 @@ "email": "dummy1@cvat.org", "first_name": "Dummy", "groups": [], + "has_analytics_access": false, "id": 14, "is_active": true, "is_staff": false, @@ -125,67 +133,71 @@ }, { "date_joined": "2021-12-14T18:35:15Z", - "email": "business4@cvat.org", - "first_name": "Business", + "email": "user10@cvat.org", + "first_name": "User", "groups": [ - "business" + "user" ], + "has_analytics_access": false, "id": 13, "is_active": true, "is_staff": false, "is_superuser": false, "last_login": null, - "last_name": "Fourth", + "last_name": "Tenth", "url": "http://localhost:8080/api/users/13", - "username": "business4" + "username": "user10" }, { "date_joined": "2021-12-14T18:34:34Z", - "email": "business3@cvat.org", - "first_name": "Business", + "email": "user9@cvat.org", + "first_name": "User", "groups": [ - "business" + "user" ], + "has_analytics_access": false, "id": 12, "is_active": true, "is_staff": false, "is_superuser": false, "last_login": null, - "last_name": "Third", + "last_name": "Nineth", "url": "http://localhost:8080/api/users/12", - "username": "business3" + "username": "user9" }, { "date_joined": "2021-12-14T18:34:01Z", - "email": "business2@cvat.org", - "first_name": "Business", + "email": "user8@cvat.org", + "first_name": "User", "groups": [ - "business" + "user" ], + "has_analytics_access": false, "id": 11, "is_active": true, "is_staff": false, "is_superuser": false, "last_login": "2022-03-17T07:22:55.930000Z", - "last_name": "Second", + "last_name": "Eighth", "url": "http://localhost:8080/api/users/11", - "username": "business2" + "username": "user8" }, { "date_joined": "2021-12-14T18:33:06Z", - "email": "business1@cvat.org", - "first_name": "Business", + "email": "user7@cvat.org", + "first_name": "User", "groups": [ - "business" + "user" ], + "has_analytics_access": false, "id": 10, "is_active": true, "is_staff": false, "is_superuser": false, "last_login": "2022-09-28T12:17:51.373000Z", - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, { "date_joined": "2021-12-14T18:32:01Z", @@ -194,6 +206,7 @@ "groups": [ "worker" ], + "has_analytics_access": true, "id": 9, "is_active": true, "is_staff": false, @@ -210,6 +223,7 @@ "groups": [ "worker" ], + "has_analytics_access": false, "id": 8, "is_active": true, "is_staff": false, @@ -226,6 +240,7 @@ "groups": [ "worker" ], + "has_analytics_access": false, "id": 7, "is_active": true, "is_staff": false, @@ -242,6 +257,7 @@ "groups": [ "worker" ], + "has_analytics_access": false, "id": 6, "is_active": true, "is_staff": false, @@ -258,6 +274,7 @@ "groups": [ "user" ], + "has_analytics_access": true, "id": 5, "is_active": true, "is_staff": false, @@ -274,6 +291,7 @@ "groups": [ "user" ], + "has_analytics_access": false, "id": 4, "is_active": true, "is_staff": false, @@ -290,6 +308,7 @@ "groups": [ "user" ], + "has_analytics_access": false, "id": 3, "is_active": true, "is_staff": false, @@ -306,6 +325,7 @@ "groups": [ "user" ], + "has_analytics_access": false, "id": 2, "is_active": true, "is_staff": false, @@ -322,6 +342,7 @@ "groups": [ "admin" ], + "has_analytics_access": true, "id": 1, "is_active": true, "is_staff": true, diff --git a/tests/python/shared/assets/webhooks.json b/tests/python/shared/assets/webhooks.json index da5b0f6837d9..b6a90828ee3c 100644 --- a/tests/python/shared/assets/webhooks.json +++ b/tests/python/shared/assets/webhooks.json @@ -95,11 +95,11 @@ "is_active": true, "organization": null, "owner": { - "first_name": "Business", + "first_name": "User", "id": 10, - "last_name": "First", + "last_name": "Seventh", "url": "http://localhost:8080/api/users/10", - "username": "business1" + "username": "user7" }, "project_id": 1, "target_url": "http://example.com/", diff --git a/tests/python/shared/fixtures/data.py b/tests/python/shared/fixtures/data.py index 64ac7d09cbbc..0f6fb6939544 100644 --- a/tests/python/shared/fixtures/data.py +++ b/tests/python/shared/fixtures/data.py @@ -5,8 +5,8 @@ import json import operator from collections import defaultdict +from collections.abc import Iterable from copy import deepcopy -from typing import Iterable import pytest @@ -367,14 +367,28 @@ def find(**kwargs): @pytest.fixture(scope="session") def test_db(users, users_by_name, memberships): data = [] - fields = ["username", "id", "privilege", "role", "org", "membership_id", "is_superuser"] + fields = [ + "username", + "id", + "privilege", + "role", + "org", + "membership_id", + "is_superuser", + "has_analytics_access", + ] def add_row(**kwargs): data.append({field: kwargs.get(field) for field in fields}) for user in users: for group in user["groups"]: - add_row(username=user["username"], id=user["id"], privilege=group) + add_row( + username=user["username"], + id=user["id"], + privilege=group, + has_analytics_access=user["has_analytics_access"], + ) for membership in memberships: username = membership["user"]["username"] @@ -386,6 +400,7 @@ def add_row(**kwargs): id=membership["user"]["id"], org=membership["organization"], membership_id=membership["id"], + has_analytics_access=users_by_name[username]["has_analytics_access"], ) return data diff --git a/tests/python/shared/fixtures/init.py b/tests/python/shared/fixtures/init.py index aa1192a0acf5..1f5d57ffc5d7 100644 --- a/tests/python/shared/fixtures/init.py +++ b/tests/python/shared/fixtures/init.py @@ -10,7 +10,7 @@ from pathlib import Path from subprocess import PIPE, CalledProcessError, run from time import sleep -from typing import List, Union +from typing import Union import pytest import requests @@ -158,13 +158,13 @@ def docker_exec(container, command, capture_output=True): return _run(f"docker exec -u root {PREFIX}_{container}_1 {command}", capture_output) -def docker_exec_cvat(command: Union[List[str], str]): +def docker_exec_cvat(command: Union[list[str], str]): base = f"docker exec {PREFIX}_cvat_server_1" _command = f"{base} {command}" if isinstance(command, str) else base.split() + command return _run(_command) -def kube_exec_cvat(command: Union[List[str], str]): +def kube_exec_cvat(command: Union[list[str], str]): pod_name = _kube_get_server_pod_name() base = f"kubectl exec {pod_name} --" _command = f"{base} {command}" if isinstance(command, str) else base.split() + command @@ -300,9 +300,10 @@ def dump_db(): def create_compose_files(container_name_files): for filename in container_name_files: - with open(filename.with_name(filename.name.replace(".tests", "")), "r") as dcf, open( - filename, "w" - ) as ndcf: + with ( + open(filename.with_name(filename.name.replace(".tests", "")), "r") as dcf, + open(filename, "w") as ndcf, + ): dc_config = yaml.safe_load(dcf) for service_name, service_config in dc_config["services"].items(): diff --git a/tests/python/shared/utils/config.py b/tests/python/shared/utils/config.py index f313334c797d..e65ac0b904a5 100644 --- a/tests/python/shared/utils/config.py +++ b/tests/python/shared/utils/config.py @@ -2,9 +2,9 @@ # # SPDX-License-Identifier: MIT +from collections.abc import Generator from contextlib import contextmanager from pathlib import Path -from typing import Generator import requests from cvat_sdk.api_client import ApiClient, Configuration diff --git a/tests/python/shared/utils/helpers.py b/tests/python/shared/utils/helpers.py index 14015f4b2ad3..4855796a0a86 100644 --- a/tests/python/shared/utils/helpers.py +++ b/tests/python/shared/utils/helpers.py @@ -3,9 +3,10 @@ # SPDX-License-Identifier: MIT import subprocess +from collections.abc import Generator from contextlib import closing from io import BytesIO -from typing import Generator, List, Optional, Tuple +from typing import Optional import av import av.video.reformatter @@ -27,10 +28,10 @@ def generate_image_file(filename="image.png", size=(100, 50), color=(0, 0, 0)): def generate_image_files( count: int, *, - prefixes: Optional[List[str]] = None, - filenames: Optional[List[str]] = None, - sizes: Optional[List[Tuple[int, int]]] = None, -) -> List[BytesIO]: + prefixes: Optional[list[str]] = None, + filenames: Optional[list[str]] = None, + sizes: Optional[list[tuple[int, int]]] = None, +) -> list[BytesIO]: assert not (prefixes and filenames), "prefixes cannot be used together with filenames" assert not prefixes or len(prefixes) == count assert not filenames or len(filenames) == count diff --git a/tests/python/shared/utils/resource_import_export.py b/tests/python/shared/utils/resource_import_export.py index 37983dbd1478..c8b4fd7ca93b 100644 --- a/tests/python/shared/utils/resource_import_export.py +++ b/tests/python/shared/utils/resource_import_export.py @@ -1,10 +1,10 @@ import functools import json -from abc import ABC, abstractstaticmethod +from abc import ABC, abstractmethod from contextlib import ExitStack from http import HTTPStatus from time import sleep -from typing import Any, Dict, Optional, TypeVar +from typing import Any, Optional, TypeVar import pytest @@ -17,7 +17,7 @@ IMPORT_FORMAT = "CVAT 1.1" -def _make_custom_resource_params(resource: str, obj: str, cloud_storage_id: int) -> Dict[str, Any]: +def _make_custom_resource_params(resource: str, obj: str, cloud_storage_id: int) -> dict[str, Any]: return { "filename": FILENAME_TEMPLATE.format(obj, resource), "location": "cloud_storage", @@ -25,7 +25,7 @@ def _make_custom_resource_params(resource: str, obj: str, cloud_storage_id: int) } -def _make_default_resource_params(resource: str, obj: str) -> Dict[str, Any]: +def _make_default_resource_params(resource: str, obj: str) -> dict[str, Any]: return { "filename": FILENAME_TEMPLATE.format(obj, resource), } @@ -33,7 +33,7 @@ def _make_default_resource_params(resource: str, obj: str) -> Dict[str, Any]: def _make_export_resource_params( resource: str, is_default: bool = True, **kwargs -) -> Dict[str, Any]: +) -> dict[str, Any]: func = _make_default_resource_params if is_default else _make_custom_resource_params params = func(resource, **kwargs) if resource != "backup": @@ -43,7 +43,7 @@ def _make_export_resource_params( def _make_import_resource_params( resource: str, is_default: bool = True, **kwargs -) -> Dict[str, Any]: +) -> dict[str, Any]: func = _make_default_resource_params if is_default else _make_custom_resource_params params = func(resource, **kwargs) if resource != "backup": @@ -52,7 +52,8 @@ def _make_import_resource_params( class _CloudStorageResourceTest(ABC): - @abstractstaticmethod + @staticmethod + @abstractmethod def _make_client(): pass @@ -64,7 +65,7 @@ def setup(self, admin_user: str): with self.exit_stack: yield - def _ensure_file_created(self, func: T, storage: Dict[str, Any]) -> T: + def _ensure_file_created(self, func: T, storage: dict[str, Any]) -> T: @functools.wraps(func) def wrapper(*args, **kwargs): filename = kwargs["filename"] @@ -219,7 +220,7 @@ def _import_dataset_from_cloud_storage( response = get_method(user, url, action="import_status", rq_id=rq_id) status = response.status_code - def _import_resource(self, cloud_storage: Dict[str, Any], resource_type: str, *args, **kwargs): + def _import_resource(self, cloud_storage: dict[str, Any], resource_type: str, *args, **kwargs): methods = { "annotations": self._import_annotations_from_cloud_storage, "dataset": self._import_dataset_from_cloud_storage, @@ -234,7 +235,7 @@ def _import_resource(self, cloud_storage: Dict[str, Any], resource_type: str, *a return methods[resource_type](*args, **kwargs) - def _export_resource(self, cloud_storage: Dict[str, Any], *args, **kwargs): + def _export_resource(self, cloud_storage: dict[str, Any], *args, **kwargs): org_id = cloud_storage["organization"] if org_id: kwargs.setdefault("org_id", org_id) diff --git a/utils/__init__.py b/utils/__init__.py index d0af4b967942..6370694b5ea2 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,4 +1,3 @@ # Copyright (C) 2022 Intel Corporation # # SPDX-License-Identifier: MIT - diff --git a/utils/dataset_manifest/core.py b/utils/dataset_manifest/core.py index 6a7c9d92f0d6..449e70d64098 100644 --- a/utils/dataset_manifest/core.py +++ b/utils/dataset_manifest/core.py @@ -9,7 +9,8 @@ import json import os -from abc import ABC, abstractmethod, abstractproperty, abstractstaticmethod +from abc import ABC, abstractmethod +from collections.abc import Iterator from contextlib import closing from itertools import islice from PIL import Image @@ -20,7 +21,7 @@ from .utils import SortingMethod, md5_hash, rotate_image, sort from .types import NamedBytesIO -from typing import Any, Dict, List, Union, Optional, Iterator, Tuple, Callable +from typing import Any, Union, Optional, Callable class VideoStreamReader: @@ -78,7 +79,7 @@ def validate_key_frame(self, container, video_stream, key_frame): return False return True - def __iter__(self) -> Iterator[Union[int, Tuple[int, int, str]]]: + def __iter__(self) -> Iterator[Union[int, tuple[int, int, str]]]: """ Iterate over video frames and yield key frames or indexes. @@ -143,12 +144,12 @@ def __iter__(self) -> Iterator[Union[int, Tuple[int, int, str]]]: class DatasetImagesReader: def __init__(self, - sources: Union[List[str], Iterator[NamedBytesIO]], + sources: Union[list[str], Iterator[NamedBytesIO]], *, start: int = 0, step: int = 1, stop: Optional[int] = None, - meta: Optional[Dict[str, List[str]]] = None, + meta: Optional[dict[str, list[str]]] = None, sorting_method: SortingMethod = SortingMethod.PREDEFINED, use_image_hash: bool = False, **kwargs @@ -196,7 +197,7 @@ def step(self): def step(self, value): self._step = int(value) - def _get_img_properties(self, image: Union[str, NamedBytesIO]) -> Dict[str, Any]: + def _get_img_properties(self, image: Union[str, NamedBytesIO]) -> dict[str, Any]: img = Image.open(image, mode='r') if self._data_dir: img_name = os.path.relpath(image, self._data_dir) @@ -469,7 +470,8 @@ def __getitem__(self, item): def index(self): return self._index - @abstractproperty + @property + @abstractmethod def data(self): ... @@ -665,7 +667,7 @@ def emulate_hierarchical_structure( prefix: str = "", default_prefix: Optional[str] = None, start_index: Optional[int] = None, - ) -> Dict: + ) -> dict: if default_prefix and prefix and not (default_prefix.startswith(prefix) or prefix.startswith(default_prefix)): return { @@ -727,12 +729,12 @@ def emulate_hierarchical_structure( 'next': next_start_index, } - def reorder(self, reordered_images: List[str]) -> None: + def reorder(self, reordered_images: list[str]) -> None: """ The method takes a list of image names and reorders its content based on this new list. Due to the implementation of Honeypots, the reordered list of image names may contain duplicates. """ - unique_images: Dict[str, Any] = {} + unique_images: dict[str, Any] = {} for _, image_details in self: if image_details.full_name not in unique_images: unique_images[image_details.full_name] = image_details @@ -766,11 +768,13 @@ def _validate_type(self, _dict): if not _dict['type'] == self.TYPE: raise InvalidManifestError('Incorrect type field') - @abstractproperty + @property + @abstractmethod def validators(self): pass - @abstractstaticmethod + @staticmethod + @abstractmethod def _validate_first_item(_dict): pass diff --git a/utils/dataset_manifest/requirements.txt b/utils/dataset_manifest/requirements.txt index c103c3e79add..6d3ed66aecb1 100644 --- a/utils/dataset_manifest/requirements.txt +++ b/utils/dataset_manifest/requirements.txt @@ -13,7 +13,7 @@ numpy==1.22.4 # via opencv-python-headless opencv-python-headless==4.10.0.84 # via -r utils/dataset_manifest/requirements.in -pillow==10.4.0 +pillow==11.0.0 # via -r utils/dataset_manifest/requirements.in -tqdm==4.66.5 +tqdm==4.67.1 # via -r utils/dataset_manifest/requirements.in diff --git a/utils/dicom_converter/script.py b/utils/dicom_converter/script.py index 23a1e7526e3a..3fe7ef0be6dd 100644 --- a/utils/dicom_converter/script.py +++ b/utils/dicom_converter/script.py @@ -16,10 +16,20 @@ # Script configuration -logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s') -parser = argparse.ArgumentParser(description='The script is used to convert some kinds of DICOM (.dcm) files to regular image files (.png)') -parser.add_argument('input', type=str, help='A root directory with medical data files in DICOM format. The script finds all these files based on their extension') -parser.add_argument('output', type=str, help='Where to save converted files. The script repeats internal directories structure of the input root directory') +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(message)s") +parser = argparse.ArgumentParser( + description="The script is used to convert some kinds of DICOM (.dcm) files to regular image files (.png)" +) +parser.add_argument( + "input", + type=str, + help="A root directory with medical data files in DICOM format. The script finds all these files based on their extension", +) +parser.add_argument( + "output", + type=str, + help="Where to save converted files. The script repeats internal directories structure of the input root directory", +) args = parser.parse_args() @@ -32,11 +42,11 @@ def __init__(self, filename): self._max_value = ds.pixel_array.max() self._depth = ds.BitsStored - logging.debug('File: {}'.format(filename)) - logging.debug('Photometric interpretation: {}'.format(self._photometric_interpretation)) - logging.debug('Min value: {}'.format(self._min_value)) - logging.debug('Max value: {}'.format(self._max_value)) - logging.debug('Depth: {}'.format(self._depth)) + logging.debug("File: {}".format(filename)) + logging.debug("Photometric interpretation: {}".format(self._photometric_interpretation)) + logging.debug("Min value: {}".format(self._min_value)) + logging.debug("Max value: {}".format(self._max_value)) + logging.debug("Depth: {}".format(self._depth)) try: self._length = ds["NumberOfFrames"].value @@ -53,38 +63,40 @@ def __iter__(self): for pixel_array in self._pixel_array: # Normalization to an output range 0..255, 0..65535 pixel_array = pixel_array - self._min_value - pixel_array = pixel_array.astype(int) * (2 ** self._depth - 1) + pixel_array = pixel_array.astype(int) * (2**self._depth - 1) pixel_array = pixel_array // (self._max_value - self._min_value) # In some cases we need to convert colors additionally - if 'YBR' in self._photometric_interpretation: - pixel_array = convert_color_space(pixel_array, self._photometric_interpretation, 'RGB') + if "YBR" in self._photometric_interpretation: + pixel_array = convert_color_space( + pixel_array, self._photometric_interpretation, "RGB" + ) if self._depth == 8: image = Image.fromarray(pixel_array.astype(np.uint8)) elif self._depth == 16: image = Image.fromarray(pixel_array.astype(np.uint16)) else: - raise Exception('Not supported depth {}'.format(self._depth)) + raise Exception("Not supported depth {}".format(self._depth)) yield image def main(root_dir, output_root_dir): - dicom_files = glob(os.path.join(root_dir, '**', '*.dcm'), recursive = True) + dicom_files = glob(os.path.join(root_dir, "**", "*.dcm"), recursive=True) if not len(dicom_files): - logging.info('DICOM files are not found under the specified path') + logging.info("DICOM files are not found under the specified path") else: - logging.info('Number of found DICOM files: ' + str(len(dicom_files))) + logging.info("Number of found DICOM files: " + str(len(dicom_files))) pbar = tqdm(dicom_files) for input_filename in pbar: - pbar.set_description('Conversion: ' + input_filename) + pbar.set_description("Conversion: " + input_filename) input_basename = os.path.basename(input_filename) output_subpath = os.path.relpath(os.path.dirname(input_filename), root_dir) output_path = os.path.join(output_root_dir, output_subpath) - output_basename = '{}.png'.format(os.path.splitext(input_basename)[0]) + output_basename = "{}.png".format(os.path.splitext(input_basename)[0]) output_filename = os.path.join(output_path, output_basename) if not os.path.exists(output_path): @@ -98,16 +110,19 @@ def main(root_dir, output_root_dir): image.save(output_filename) else: filename_index = str(i).zfill(len(str(length))) - list_output_filename = '{}_{}.png'.format(os.path.splitext(output_filename)[0], filename_index) + list_output_filename = "{}_{}.png".format( + os.path.splitext(output_filename)[0], filename_index + ) image.save(list_output_filename) except Exception as ex: - logging.error('Error while processing ' + input_filename) + logging.error("Error while processing " + input_filename) logging.error(ex) -if __name__ == '__main__': + +if __name__ == "__main__": input_root_path = os.path.abspath(args.input.rstrip(os.sep)) output_root_path = os.path.abspath(args.output.rstrip(os.sep)) - logging.info('From: {}'.format(input_root_path)) - logging.info('To: {}'.format(output_root_path)) + logging.info("From: {}".format(input_root_path)) + logging.info("To: {}".format(output_root_path)) main(input_root_path, output_root_path) diff --git a/wait-for-it.sh b/wait-for-it.sh deleted file mode 100755 index 12f10ee7dcd7..000000000000 --- a/wait-for-it.sh +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env bash -# Use this script to test if a given TCP host/port are available -# https://github.com/vishnubob/wait-for-it - -cmdname=$(basename $0) - -echoerr() { if [[ $QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } - -usage() -{ - cat << USAGE >&2 -Usage: - $cmdname host:port [-s] [-t timeout] [-- command args] - -h HOST | --host=HOST Host or IP under test - -p PORT | --port=PORT TCP port under test - Alternatively, you specify the host and port as host:port - -s | --strict Only execute subcommand if the test succeeds - -q | --quiet Don't output any status messages - -t TIMEOUT | --timeout=TIMEOUT - Timeout in seconds, zero for no timeout - -- COMMAND ARGS Execute command with args after the test finishes -USAGE - exit 1 -} - -wait_for() -{ - if [[ $TIMEOUT -gt 0 ]]; then - echoerr "$cmdname: waiting $TIMEOUT seconds for $HOST:$PORT" - else - echoerr "$cmdname: waiting for $HOST:$PORT without a timeout" - fi - start_ts=$(date +%s) - while : - do - if [[ $ISBUSY -eq 1 ]]; then - nc -z $HOST $PORT - result=$? - else - (echo > /dev/tcp/$HOST/$PORT) >/dev/null 2>&1 - result=$? - fi - if [[ $result -eq 0 ]]; then - end_ts=$(date +%s) - echoerr "$cmdname: $HOST:$PORT is available after $((end_ts - start_ts)) seconds" - break - fi - sleep 1 - done - return $result -} - -wait_for_wrapper() -{ - # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 - if [[ $QUIET -eq 1 ]]; then - timeout $BUSYTIMEFLAG $TIMEOUT $0 --quiet --child --host=$HOST --port=$PORT --timeout=$TIMEOUT & - else - timeout $BUSYTIMEFLAG $TIMEOUT $0 --child --host=$HOST --port=$PORT --timeout=$TIMEOUT & - fi - PID=$! - trap "kill -INT -$PID" INT - wait $PID - RESULT=$? - if [[ $RESULT -ne 0 ]]; then - echoerr "$cmdname: timeout occurred after waiting $TIMEOUT seconds for $HOST:$PORT" - fi - return $RESULT -} - -# process arguments -while [[ $# -gt 0 ]] -do - case "$1" in - *:* ) - hostport=(${1//:/ }) - HOST=${hostport[0]} - PORT=${hostport[1]} - shift 1 - ;; - --child) - CHILD=1 - shift 1 - ;; - -q | --quiet) - QUIET=1 - shift 1 - ;; - -s | --strict) - STRICT=1 - shift 1 - ;; - -h) - HOST="$2" - if [[ $HOST == "" ]]; then break; fi - shift 2 - ;; - --host=*) - HOST="${1#*=}" - shift 1 - ;; - -p) - PORT="$2" - if [[ $PORT == "" ]]; then break; fi - shift 2 - ;; - --port=*) - PORT="${1#*=}" - shift 1 - ;; - -t) - TIMEOUT="$2" - if [[ $TIMEOUT == "" ]]; then break; fi - shift 2 - ;; - --timeout=*) - TIMEOUT="${1#*=}" - shift 1 - ;; - --) - shift - CLI=("$@") - break - ;; - --help) - usage - ;; - *) - echoerr "Unknown argument: $1" - usage - ;; - esac -done - -if [[ "$HOST" == "" || "$PORT" == "" ]]; then - echoerr "Error: you need to provide a host and port to test." - usage -fi - -TIMEOUT=${TIMEOUT:-15} -STRICT=${STRICT:-0} -CHILD=${CHILD:-0} -QUIET=${QUIET:-0} - -# check to see if timeout is from busybox? -# check to see if timeout is from busybox? -TIMEOUT_PATH=$(realpath $(which timeout)) -if [[ $TIMEOUT_PATH =~ "busybox" ]]; then - ISBUSY=1 - BUSYTIMEFLAG="-t" -else - ISBUSY=0 - BUSYTIMEFLAG="" -fi - -if [[ $CHILD -gt 0 ]]; then - wait_for - RESULT=$? - exit $RESULT -else - if [[ $TIMEOUT -gt 0 ]]; then - wait_for_wrapper - RESULT=$? - else - wait_for - RESULT=$? - fi -fi - -if [[ $CLI != "" ]]; then - if [[ $RESULT -ne 0 && $STRICT -eq 1 ]]; then - echoerr "$cmdname: strict mode, refusing to execute subprocess" - exit $RESULT - fi - exec "${CLI[@]}" -else - exit $RESULT -fi diff --git a/wait_for_deps.sh b/wait_for_deps.sh index c78950cf96c4..6cf96886fd69 100755 --- a/wait_for_deps.sh +++ b/wait_for_deps.sh @@ -11,8 +11,8 @@ # but it's too resource-intensive to execute for every worker we might be running # in a container. Instead, it's in backend_entrypoint.sh. -~/wait-for-it.sh "${CVAT_POSTGRES_HOST}:${CVAT_POSTGRES_PORT:-5432}" -t 0 -~/wait-for-it.sh "${CVAT_REDIS_INMEM_HOST}:${CVAT_REDIS_INMEM_PORT}" -t 0 -~/wait-for-it.sh "${CVAT_REDIS_ONDISK_HOST}:${CVAT_REDIS_ONDISK_PORT}" -t 0 +wait-for-it "${CVAT_POSTGRES_HOST}:${CVAT_POSTGRES_PORT:-5432}" -t 0 +wait-for-it "${CVAT_REDIS_INMEM_HOST}:${CVAT_REDIS_INMEM_PORT}" -t 0 +wait-for-it "${CVAT_REDIS_ONDISK_HOST}:${CVAT_REDIS_ONDISK_PORT}" -t 0 exec "$@" diff --git a/yarn.lock b/yarn.lock index 111a2b86f925..46516547350c 100644 --- a/yarn.lock +++ b/yarn.lock @@ -10,13 +10,6 @@ "@jridgewell/gen-mapping" "^0.3.5" "@jridgewell/trace-mapping" "^0.3.24" -"@ant-design/colors@^6.0.0": - version "6.0.0" - resolved "https://registry.yarnpkg.com/@ant-design/colors/-/colors-6.0.0.tgz#9b9366257cffcc47db42b9d0203bb592c13c0298" - integrity sha512-qAZRvPzfdWHtfameEGP2Qvuf838NhergR35o+EuVyB5XvSA98xod5r4utvi4TJ3ywmevm290g9nsCG5MryrdWQ== - dependencies: - "@ctrl/tinycolor" "^3.4.0" - "@ant-design/colors@^7.0.0", "@ant-design/colors@^7.0.2": version "7.0.2" resolved "https://registry.yarnpkg.com/@ant-design/colors/-/colors-7.0.2.tgz#c5c753a467ce8d86ba7ca4736d2c01f599bb5492" @@ -55,31 +48,19 @@ rc-util "^5.35.0" stylis "^4.0.13" -"@ant-design/icons-svg@^4.3.0", "@ant-design/icons-svg@^4.4.0": +"@ant-design/icons-svg@^4.4.0": version "4.4.2" resolved "https://registry.yarnpkg.com/@ant-design/icons-svg/-/icons-svg-4.4.2.tgz#ed2be7fb4d82ac7e1d45a54a5b06d6cecf8be6f6" integrity sha512-vHbT+zJEVzllwP+CM+ul7reTEfBR0vgxFe7+lREAsAA7YGsYpboiq2sQNeQeRvh09GfQgs/GyFEvZpJ9cLXpXA== -"@ant-design/icons@^4.6.3": - version "4.8.3" - resolved "https://registry.yarnpkg.com/@ant-design/icons/-/icons-4.8.3.tgz#41555408ed5e9b0c3d53f3f24fe6a73abfcf4000" - integrity sha512-HGlIQZzrEbAhpJR6+IGdzfbPym94Owr6JZkJ2QCCnOkPVIWMO2xgIVcOKnl8YcpijIo39V7l2qQL5fmtw56cMw== - dependencies: - "@ant-design/colors" "^6.0.0" - "@ant-design/icons-svg" "^4.3.0" - "@babel/runtime" "^7.11.2" - classnames "^2.2.6" - lodash "^4.17.15" - rc-util "^5.9.4" - -"@ant-design/icons@^5.3.7": - version "5.3.7" - resolved "https://registry.yarnpkg.com/@ant-design/icons/-/icons-5.3.7.tgz#d9f3654bf7934ee5faba43f91b5a187f5309ec68" - integrity sha512-bCPXTAg66f5bdccM4TT21SQBDO1Ek2gho9h3nO9DAKXJP4sq+5VBjrQMSxMVXSB3HyEz+cUbHQ5+6ogxCOpaew== +"@ant-design/icons@^5.3.7", "@ant-design/icons@^5.5.2": + version "5.5.2" + resolved "https://registry.yarnpkg.com/@ant-design/icons/-/icons-5.5.2.tgz#c4567943cc2b7c6dbe9cae68c06ffa35f755dc0d" + integrity sha512-xc53rjVBl9v2BqFxUjZGti/RfdDeA8/6KYglmInM2PNqSXc/WfuGDTifJI/ZsokJK0aeKvOIbXc9y2g8ILAhEA== dependencies: "@ant-design/colors" "^7.0.0" "@ant-design/icons-svg" "^4.4.0" - "@babel/runtime" "^7.11.2" + "@babel/runtime" "^7.24.8" classnames "^2.2.6" rc-util "^5.31.1" @@ -1134,17 +1115,10 @@ resolved "https://registry.yarnpkg.com/@babel/regjsgen/-/regjsgen-0.8.0.tgz#f0ba69b075e1f05fb2825b7fad991e7adbb18310" integrity sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA== -"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.1", "@babel/runtime@^7.10.4", "@babel/runtime@^7.11.1", "@babel/runtime@^7.11.2", "@babel/runtime@^7.12.1", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.14.6", "@babel/runtime@^7.16.7", "@babel/runtime@^7.17.2", "@babel/runtime@^7.18.0", "@babel/runtime@^7.18.3", "@babel/runtime@^7.2.0", "@babel/runtime@^7.20.0", "@babel/runtime@^7.20.7", "@babel/runtime@^7.21.0", "@babel/runtime@^7.22.5", "@babel/runtime@^7.23.2", "@babel/runtime@^7.23.6", "@babel/runtime@^7.23.9", "@babel/runtime@^7.24.4", "@babel/runtime@^7.24.5", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2": - version "7.24.5" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.24.5.tgz#230946857c053a36ccc66e1dd03b17dd0c4ed02c" - integrity sha512-Nms86NXrsaeU9vbBJKni6gXiEXZ4CVpYVzEjDH9Sb8vmZ3UljyA1GSOJl/6LGPO8EHLuSF9H+IxNXHPX8QHJ4g== - dependencies: - regenerator-runtime "^0.14.0" - -"@babel/runtime@^7.24.7": - version "7.24.7" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.24.7.tgz#f4f0d5530e8dbdf59b3451b9b3e594b6ba082e12" - integrity sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw== +"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.1", "@babel/runtime@^7.10.4", "@babel/runtime@^7.11.1", "@babel/runtime@^7.11.2", "@babel/runtime@^7.12.1", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.14.6", "@babel/runtime@^7.16.7", "@babel/runtime@^7.17.2", "@babel/runtime@^7.18.0", "@babel/runtime@^7.18.3", "@babel/runtime@^7.2.0", "@babel/runtime@^7.20.0", "@babel/runtime@^7.20.7", "@babel/runtime@^7.21.0", "@babel/runtime@^7.22.5", "@babel/runtime@^7.23.2", "@babel/runtime@^7.23.6", "@babel/runtime@^7.23.9", "@babel/runtime@^7.24.4", "@babel/runtime@^7.24.5", "@babel/runtime@^7.24.7", "@babel/runtime@^7.24.8", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2": + version "7.26.0" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.26.0.tgz#8600c2f595f277c60815256418b85356a65173c1" + integrity sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw== dependencies: regenerator-runtime "^0.14.0" @@ -1481,7 +1455,7 @@ resolved "https://registry.yarnpkg.com/@csstools/utilities/-/utilities-1.0.0.tgz#42f3c213f2fb929324d465684ab9f46a0febd4bb" integrity sha512-tAgvZQe/t2mlvpNosA4+CkMiZ2azISW5WPAcdSalZlEjQvUfghHxfQcrCiK/7/CrfAWVxyM88kGFYO82heIGDg== -"@ctrl/tinycolor@^3.4.0", "@ctrl/tinycolor@^3.6.1": +"@ctrl/tinycolor@^3.6.1": version "3.6.1" resolved "https://registry.yarnpkg.com/@ctrl/tinycolor/-/tinycolor-3.6.1.tgz#b6c75a56a1947cc916ea058772d666a2c8932f31" integrity sha512-SITSV6aIXsuVNV3f3O0f2n/cgyEDWoSqtZMYiAmcsYHydcKrOz3gUxB/iXd/Qf08+IZX4KpgNbvUdMBmWz+kcA== @@ -4236,9 +4210,9 @@ create-react-class@^15.5.3: object-assign "^4.1.1" cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + version "7.0.6" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" + integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== dependencies: path-key "^3.1.0" shebang-command "^2.0.0" @@ -4402,7 +4376,7 @@ custom-error-instance@2.1.1: three "^0.156.1" "cvat-canvas@link:./cvat-canvas": - version "2.20.9" + version "2.20.10" dependencies: "@types/polylabel" "^1.0.5" polylabel "^1.1.0" @@ -4413,7 +4387,7 @@ custom-error-instance@2.1.1: svg.select.js "3.0.1" "cvat-core@link:./cvat-core": - version "15.2.0" + version "15.3.1" dependencies: axios "^1.7.4" axios-retry "^4.0.0" @@ -6294,9 +6268,9 @@ http-proxy-agent@^5.0.0: debug "4" http-proxy-middleware@^2.0.3: - version "2.0.6" - resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz#e1a4dd6979572c7ab5a4e4b55095d1f32a74963f" - integrity sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw== + version "2.0.7" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.7.tgz#915f236d92ae98ef48278a95dedf17e991936ec6" + integrity sha512-fgVY8AV7qU7z/MmXJ/rxwbrtQH4jBQ9m7kp3llF0liB7glmFeVZFBepQb32T3y8n8k2+AEYuMPCpinYW+/CuRA== dependencies: "@types/http-proxy" "^1.17.8" http-proxy "^1.18.1" @@ -8802,9 +8776,9 @@ nan@^2.17.0: integrity sha512-nO1xXxfh/RWNxfd/XPfbIfFk5vgLsAxUR9y5O0cHMJu/AW9U95JLXqthYHjEp+8gQ5p96K9jUp8nbVOxCdRbtw== nanoid@^3.3.7: - version "3.3.7" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" - integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== + version "3.3.8" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" + integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== natural-compare@^1.4.0: version "1.4.0" @@ -10438,7 +10412,7 @@ rc-util@^4.15.3: react-lifecycles-compat "^3.0.4" shallowequal "^1.1.0" -rc-util@^5.0.1, rc-util@^5.16.1, rc-util@^5.17.0, rc-util@^5.18.1, rc-util@^5.2.0, rc-util@^5.20.1, rc-util@^5.21.0, rc-util@^5.24.4, rc-util@^5.24.5, rc-util@^5.25.2, rc-util@^5.27.0, rc-util@^5.28.0, rc-util@^5.30.0, rc-util@^5.31.1, rc-util@^5.32.2, rc-util@^5.34.1, rc-util@^5.35.0, rc-util@^5.36.0, rc-util@^5.37.0, rc-util@^5.38.0, rc-util@^5.38.1, rc-util@^5.39.3, rc-util@^5.9.4: +rc-util@^5.0.1, rc-util@^5.16.1, rc-util@^5.17.0, rc-util@^5.18.1, rc-util@^5.2.0, rc-util@^5.20.1, rc-util@^5.21.0, rc-util@^5.24.4, rc-util@^5.24.5, rc-util@^5.25.2, rc-util@^5.27.0, rc-util@^5.28.0, rc-util@^5.30.0, rc-util@^5.31.1, rc-util@^5.32.2, rc-util@^5.34.1, rc-util@^5.35.0, rc-util@^5.36.0, rc-util@^5.37.0, rc-util@^5.38.0, rc-util@^5.38.1, rc-util@^5.39.3: version "5.39.3" resolved "https://registry.yarnpkg.com/rc-util/-/rc-util-5.39.3.tgz#79c7253cff7c71175b772e8242ca66459c1512eb" integrity sha512-j9wOELkLQ8gC/NkUg3qg9mHZcJf+5mYYv40JrDHqnaf8VSycji4pCf7kJ5fdTXQPDIF0vr5zpb/T2HdrMs9rWA==