diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e684c61ee0f..36b15b0d60a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -50,12 +50,12 @@ static/ @rohanmaharjan100 @wei-harness # data/*.ts @global-data-file-owners # Continuous Delivery Docs -/docs/continuous-delivery/ @krishi0408 @SushrutHarness @vishal-av @pranaykaikini @prasadsatamharness -/release-notes/continuous-delivery.md @krishi0408 @SushrutHarness @vishal-av @pranaykaikini @prasadsatamharness -/src/components/Roadmap/data/cdData.ts @krishi0408 @SushrutHarness @vishal-av -/src/components/Docs/data/continuousDeliveryData* @krishi0408 @SushrutHarness @vishal-av +/docs/continuous-delivery/ @krishi0408 @SushrutHarness @vishal-av @pranaykaikini @prasadsatamharness @thisrohangupta +/release-notes/continuous-delivery.md @krishi0408 @SushrutHarness @vishal-av @pranaykaikini @prasadsatamharness @thisrohangupta +/src/components/Roadmap/data/cdData.ts @krishi0408 @SushrutHarness @vishal-av @thisrohangupta +/src/components/Docs/data/continuousDeliveryData* @krishi0408 @SushrutHarness @vishal-av @thisrohangupta # FirstGen CD Docs -/docs/category/continuous-delivery/ @krishi0408 @SushrutHarness @vishal-av +/docs/category/continuous-delivery/ @krishi0408 @SushrutHarness @vishal-av @thisrohangupta # CD FAQs /docs/faqs/continuous-delivery-faqs @krishi0408 @SushrutHarness @vishal-av # GitEx FAQs @@ -105,12 +105,12 @@ static/ @rohanmaharjan100 @wei-harness /src/components/Docs/data/serviceReliabilityManagementData.ts @sunilgupta-harness # Chaos Docs -/docs/chaos-engineering/ @neelanjan00 @SmritiSatya @shovanmaity @Jonsy13 @Saranya-jena @Adarshkumar14 @ispeakc0de @amityt @uditgaurav @S-ayanide @vanshBhatia-A4k9 +/docs/chaos-engineering/ @neelanjan00 @SmritiSatya @shovanmaity @Jonsy13 @Saranya-jena @Adarshkumar14 @ispeakc0de @amityt @uditgaurav @S-ayanide @vanshBhatia-A4k9 @ksatchit @umamukkara /release-notes/chaos-engineering.md @neelanjan00 @SmritiSatya @Jonsy13 /src/components/Roadmap/data/ceData.ts @krishi0408 @neelanjan00 @SmritiSatya @vishal-av @SushrutHarness /src/components/Docs/data/chaosEngineeringData* @krishi0408 @neelanjan00 @SmritiSatya @vishal-av @SushrutHarness # Chaos FAQs -/kb/chaos-engineering/chaos-engineering-faq @neelanjan00 @SmritiSatya @ksatchit +/kb/chaos-engineering/chaos-engineering-faq @neelanjan00 @SmritiSatya @ksatchit @umamukkara # Incident Response Docs /docs/incident-response/ @SmritiSatya diff --git a/.github/workflows/markdown-file-validation.yaml b/.github/workflows/markdown-and-image-file-validation.yaml similarity index 99% rename from .github/workflows/markdown-file-validation.yaml rename to .github/workflows/markdown-and-image-file-validation.yaml index a592a7911a7..877a448bc47 100644 --- a/.github/workflows/markdown-file-validation.yaml +++ b/.github/workflows/markdown-and-image-file-validation.yaml @@ -69,4 +69,4 @@ jobs: else echo -e "${Red} The check has failed.${Reset}" exit 1 - fi + fi \ No newline at end of file diff --git a/client_module/chatbot.js b/client-modules/chatbot.js similarity index 100% rename from client_module/chatbot.js rename to client-modules/chatbot.js diff --git a/client_module/iframeEmbed.js b/client-modules/iframeEmbed.js similarity index 100% rename from client_module/iframeEmbed.js rename to client-modules/iframeEmbed.js diff --git a/client-modules/searchBar.js b/client-modules/searchBar.js new file mode 100644 index 00000000000..a4d0367abab --- /dev/null +++ b/client-modules/searchBar.js @@ -0,0 +1,78 @@ +import ExecutionEnvironment from '@docusaurus/ExecutionEnvironment'; + +const doYourCustomStuff = () => { + const navbar__items = document.querySelector('.navbar__items'); + const navbar__sidebar__close = document.querySelector( + '.navbar-sidebar__close' + ); + const navbar__inner = document.querySelector('.navbar__inner'); + + const searchBoxMain = navbar__inner.querySelector('#coveo-search-main'); + + if (searchBoxMain) { + searchBoxMain.classList.add('main-nav-coveo'); + } + + const searchButton = document.createElement('i'); + searchButton.setAttribute('id', 'search-button'); + searchButton.setAttribute('class', 'fa-solid fa-magnifying-glass'); + + if (navbar__items) { + navbar__items.appendChild(searchButton); + if (window.location.pathname !== '/') { + const navbar__toggle = document.querySelectorAll('.navbar__toggle'); + if (navbar__toggle) { + navbar__toggle[0].addEventListener('click', () => { + const navbar__sidebar__items = document.querySelectorAll( + '.navbar-sidebar__items' + ); + if (navbar__sidebar__items[0]) { + navbar__sidebar__items[0].classList.add( + 'navbar-sidebar__items--show-secondary' + ); + } + }); + } + } + + searchButton.addEventListener('click', () => { + const navbar = document.querySelector('.navbar'); + const navbar__sidebar = document.querySelector('.navbar-sidebar'); + + if (navbar && navbar__sidebar) { + navbar.classList.add('navbar-sidebar--show'); + navbar__sidebar.classList.add('navbar-sidebar--show'); + } + + const navbar__sidebar__items = document.querySelectorAll( + '.navbar-sidebar__items' + ); + if (navbar__sidebar__items[0]) { + navbar__sidebar__items[0].classList.remove( + 'navbar-sidebar__items--show-secondary' + ); + } + }); + } + if (navbar__sidebar__close) { + navbar__sidebar__close.addEventListener('click', () => { + const navbar = document.querySelector('.navbar'); + const navbar__sidebar = document.querySelector('.navbar-sidebar'); + + if (navbar && navbar__sidebar) { + navbar.classList.remove('navbar-sidebar--show'); + navbar__sidebar.classList.remove('navbar-sidebar--show'); + } + }); + } +}; + +if (ExecutionEnvironment.canUseDOM) { + window.addEventListener('load', () => { + setInterval(doYourCustomStuff, 500); + // setTimeout(() => { + // clearInterval(interval); + // interval = 0; + // }, 2000); + }); +} diff --git a/client_module/searchBar.js b/client_module/searchBar.js deleted file mode 100644 index b3b21ccea98..00000000000 --- a/client_module/searchBar.js +++ /dev/null @@ -1,71 +0,0 @@ -import ExecutionEnvironment from "@docusaurus/ExecutionEnvironment"; - -const doYourCustomStuff = () => { - const navbar__items = document.querySelector(".navbar__items"); - const navbar__sidebar__close = document.querySelector( - ".navbar-sidebar__close" - ); - - const searchButton = document.createElement("i"); - searchButton.setAttribute("id", "search-button"); - searchButton.setAttribute("class", "fa-solid fa-magnifying-glass"); - - if (navbar__items) { - navbar__items.appendChild(searchButton); - if (window.location.pathname !== "/") { - const navbar__toggle = document.querySelectorAll(".navbar__toggle"); - if (navbar__toggle) { - navbar__toggle[0].addEventListener("click", () => { - const navbar__sidebar__items = document.querySelectorAll( - ".navbar-sidebar__items" - ); - if (navbar__sidebar__items[0]) { - navbar__sidebar__items[0].classList.add( - "navbar-sidebar__items--show-secondary" - ); - } - }); - } - } - - searchButton.addEventListener("click", () => { - const navbar = document.querySelector(".navbar"); - const navbar__sidebar = document.querySelector(".navbar-sidebar"); - - if (navbar && navbar__sidebar) { - navbar.classList.add("navbar-sidebar--show"); - navbar__sidebar.classList.add("navbar-sidebar--show"); - } - - const navbar__sidebar__items = document.querySelectorAll( - ".navbar-sidebar__items" - ); - if (navbar__sidebar__items[0]) { - navbar__sidebar__items[0].classList.remove( - "navbar-sidebar__items--show-secondary" - ); - } - }); - } - if (navbar__sidebar__close) { - navbar__sidebar__close.addEventListener("click", () => { - const navbar = document.querySelector(".navbar"); - const navbar__sidebar = document.querySelector(".navbar-sidebar"); - - if (navbar && navbar__sidebar) { - navbar.classList.remove("navbar-sidebar--show"); - navbar__sidebar.classList.remove("navbar-sidebar--show"); - } - }); - } -}; - -if (ExecutionEnvironment.canUseDOM) { - window.addEventListener("load", () => { - let interval = setInterval(doYourCustomStuff, 500); - setTimeout(() => { - clearInterval(interval); - interval = 0; - }, 2000); - }); -} diff --git a/docs/artifact-registry/manage-registries/create-registry.md b/docs/artifact-registry/manage-registries/create-registry.md index 7173bdc4cd2..ee23928c5d9 100644 --- a/docs/artifact-registry/manage-registries/create-registry.md +++ b/docs/artifact-registry/manage-registries/create-registry.md @@ -39,13 +39,24 @@ An **Upstream Proxy** for an **Artifact Registry** is a proxy configuration that - Enter the proxy Source. Either Docker Hub or a Custom source. - If it's a custom source, enter the Docker Remote Registry URL. + + - Select the **AWS ECR** source + - Enter the **ECR Remote Registy URL** + :::info AWS ECR info + This will in be your AWS Elastic Container Registry (ECR) repositories, e.g. `https://{region}.console.aws.amazon.com/ecr/repositories/{public-or-private}/{repo-id}/{repo-name}?region={region}` + ::: + Enter the Helm Remote Registry URL. + --- 1. Choose your **Authentication** method. -1. Select **Create Upstream Proxy**. +:::note public vs private authentication +Select `Access Key and Secret Key` for private sources and `Anonymous (No credentials required)` for public sources. +::: -After you've created your upstream proxy you will need to set it in a registry. To learn how to do so, go to [Set an upstream proxy](/docs/artifact-registry/manage-registries/configure-registry#set-an-upstream-proxy). +1. Select **Create Upstream Proxy**. +After you've created your upstream proxy you will need to set it in a registry. To learn how to do so, go to [Set an upstream proxy](/docs/artifact-registry/manage-registries/configure-registry#set-an-upstream-proxy). \ No newline at end of file diff --git a/docs/artifact-registry/troubleshooting/_category_.json b/docs/artifact-registry/troubleshooting/_category_.json new file mode 100644 index 00000000000..b79c3b7e8af --- /dev/null +++ b/docs/artifact-registry/troubleshooting/_category_.json @@ -0,0 +1,9 @@ +{ + "label": "Troubleshooting", + "collapsible": "false", + "collapsed": "true", + "link": { "type": "generated-index", "title": "Troubleshooting" }, + "customProps": {}, + "position": 60 + } + \ No newline at end of file diff --git a/docs/artifact-registry/troubleshooting/authorization/_category_.json b/docs/artifact-registry/troubleshooting/authorization/_category_.json new file mode 100644 index 00000000000..359b076552f --- /dev/null +++ b/docs/artifact-registry/troubleshooting/authorization/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Authorization / Authentication", + "collapsible": "true", + "collapsed": "false", + "link": { "type": "generated-index", "title": "Authorization / Authentication" }, + "customProps": {}, + "position": 10 + } \ No newline at end of file diff --git a/docs/artifact-registry/troubleshooting/authorization/auth-401-errors.md b/docs/artifact-registry/troubleshooting/authorization/auth-401-errors.md new file mode 100644 index 00000000000..70f2de07a4c --- /dev/null +++ b/docs/artifact-registry/troubleshooting/authorization/auth-401-errors.md @@ -0,0 +1,54 @@ +--- +title: Issues Pulling Images +description: Fixing 401 Unauthorized errors when pulling images from Harness Artifact Registry. +sidebar_position: 10 +--- + +This document provides steps to resolve "401 Unauthorized" errors when pulling images from Harness Artifact Registry. + +## Understanding 401 Errors When Pulling Images + +401 Unauthorized errors occur when Harness Artifact Registry fails to authenticate image pull requests from the Kubernetes cluster. + +:::tip Cluster Secrets +Ensure your cluster has the correct `imagePullSecrets` and Kubernetes secrets configured. +::: + +### Example Solution + +To authenticate your Kubernetes cluster, follow these steps: + +1. **Create a Docker Registry Secret:** + + ```bash + kubectl create secret docker-registry docker-registry-secret1 \ + --docker-server=pkg.harness.io \ + --docker-username= \ + --docker-password= \ + --docker-email= \ + --namespace=default + ``` + +2. **Update your Kubernetes deployment manifest to reference the secret:** + ```bash + apiVersion: v1 + kind: Pod + metadata: + name: your-pod-name + namespace: default + spec: + containers: + - name: your-container-name + image: pkg.harness.io/your-image:tag + imagePullSecrets: + - name: docker-registry-secret1 + ``` + +3. **Verify the Setup:** + +Deploy the updated manifest and monitor the pod status to ensure the image is pulled successfully without authentication errors. + +--- + +## Kubernetes-specific guides +Go to the [Private Registry for Kubernetes guide](https://developer.harness.io/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/cd-kubernetes-category/pull-an-image-from-a-private-registry-for-kubernetes/) or [Add Container Images as Artifacts](https://developer.harness.io/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/cd-kubernetes-category/add-artifacts-for-kubernetes-deployments/) for more details on how to pull an image from a private registry, or to add container images as artifacts for Kubernetes deployments. \ No newline at end of file diff --git a/docs/artifact-registry/troubleshooting/authorization/auth-403-errors.md b/docs/artifact-registry/troubleshooting/authorization/auth-403-errors.md new file mode 100644 index 00000000000..5fc88731c8c --- /dev/null +++ b/docs/artifact-registry/troubleshooting/authorization/auth-403-errors.md @@ -0,0 +1,16 @@ +--- +title: 403 Errors in Pipeline Execution +description: Troubleshooting common 403 errors during pipeline execution in Harness Artifact Registry. +sidebar_position: 20 +--- + +This document helps you troubleshoot 403 errors encountered during pipeline execution. + +## Understanding 403 Errors in Pipeline Execution +403 errors typically indicate a permissions or access-related issue in your pipeline setup. They may arise due to configuration changes or mismatched access credentials. Go to [role-based access control (RBAC)](https://developer.harness.io/docs/platform/role-based-access-control/rbac-in-harness) to find out how to manage pipeline permissions. + +:::tip account level access +Check if any recent changes to account-level flags or permissions may have impacted the pipeline. +::: + +Go to the [permission reference](https://developer.harness.io/docs/platform/role-based-access-control/permissions-reference/) to review the specific permissions required for your pipeline execution. \ No newline at end of file diff --git a/docs/chaos-engineering/integrations/pipeline-exec.md b/docs/chaos-engineering/integrations/pipeline-exec.md index d17ed41cb48..f808e3084d8 100644 --- a/docs/chaos-engineering/integrations/pipeline-exec.md +++ b/docs/chaos-engineering/integrations/pipeline-exec.md @@ -34,7 +34,7 @@ To execute an experiment in a pipeline, you need to have the appropriate permiss ### Integrate Experiment Pipeline with Notification -You can create a chaos experiment as a pipeline and integrate it to receive notifications about the status of the experiment. Go to [create experiment as a pipeline](/docs/chaos-engineering/use-harness-ce/experiments/create-experiments#create-experiment-as-a-pipeline) to get hands-on experience of creating a chaos experiment as a pipeline. +You can create a chaos experiment (on **Kubernetes dedicated infrastructure**, **Harness Delegate**, **Linux**, or **Windows infrastructure**) as a pipeline and integrate it to receive notifications about the status of the experiment. Go to [create experiment as a pipeline](/docs/chaos-engineering/use-harness-ce/experiments/create-experiments#create-experiment-as-a-pipeline) to create a chaos experiment as a pipeline. For more information, go to [Pipeline concepts](/docs/continuous-integration/get-started/key-concepts) and [Pipeline Modeling Overview](/docs/continuous-delivery/get-started/cd-pipeline-modeling-overview). diff --git a/docs/chaos-engineering/use-harness-ce/application-map.md b/docs/chaos-engineering/use-harness-ce/application-map.md index 46fff4e83f6..62d48816484 100644 --- a/docs/chaos-engineering/use-harness-ce/application-map.md +++ b/docs/chaos-engineering/use-harness-ce/application-map.md @@ -63,10 +63,11 @@ You can [create](#create-application-map), [edit](#edit-application-map), and [d ![](./static/app-maps/save-nw-5.png) :::info note -To view chaos-enabled experiment map, navigate to **Chaos** module and select **Application Maps**. +- To view chaos-enabled experiment map, navigate to **Chaos** module and select **Application Maps**. ![](./static/app-maps/create-nw-1.png) +- To manually associate the experiment as a part of an application map, specify the tag `applicationmap=` in the experiment. ::: ### Edit application map diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/aws.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/aws.md index 2cced246f56..14ba72b381d 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/aws.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/aws.md @@ -786,6 +786,21 @@ Lambda delete event source mapping removes the event source mapping from an AWS +### Lambda function layer detach + +Lambda function layer detach is an AWS fault that detaches the Lambda layer associated with the function, thereby causing dependency-related issues or breaking the Lambda function that relies on the layer's content. + + +Use cases + +- Debug runtime errors caused by a specific library in the layer. +- Tests how the Lambda function behaves without the dependencies provided by the layer, thereby identifying the unnecessary dependencies and reduce the layer's footprint. + + + + + + ### Lambda delete function concurrency Lambda delete function concurrency deletes the Lambda function's reserved concurrency, thereby ensuring that the function has adequate unreserved concurrency to run. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/experiments.ts b/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/experiments.ts index e7aced86ca1..14dea892468 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/experiments.ts +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/experiments.ts @@ -309,6 +309,13 @@ export const experiments: ExperimentDetails[] = [ tags: ['lambda', 'delete', 'concurrency'], category: "aws", }, + { + name: "Lambda function layer detach", + description: + "Lambda function layer detach is an AWS fault that detaches the Lambda layer associated with the function, thereby causing dependency-related issues or breaking the Lambda function that relies on the layer's content.", + tags: ['lambda', 'detach', 'function'], + category: "aws", + }, { name: "Lambda toggle event mapping state", description: diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/lambda-function-layer-detach.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/lambda-function-layer-detach.md new file mode 100644 index 00000000000..701b9b5e8ac --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/lambda-function-layer-detach.md @@ -0,0 +1,116 @@ +--- +id: lambda-function-layer-detach +title: Lambda function layer detach +--- + +Lambda function layer detach is an AWS fault that detaches the Lambda layer associated with the function, thereby causing dependency-related issues or breaking the Lambda function that relies on the layer's content. + +![Lambda Function Layer Detach](./static/images/lambda-function-layer-detach.png) + +## Use cases +Lambda function layer detach: +- Debug runtime errors caused by a specific library in the layer. +- Tests how the Lambda function behaves without the dependencies provided by the layer, thereby identifying the unnecessary dependencies and reduce the layer's footprint. + +### Prerequisites +- Kubernetes >= 1.17 +- The Lambda layer must be attached to the target Lambda function. +- Lambda function must be up and running. +- Kubernetes secret must have the AWS access configuration (key) in the `CHAOS_NAMESPACE`. Below is a sample secret file: + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: cloud-secret + type: Opaque + stringData: + cloud_config.yml: |- + # Add the cloud AWS credentials respectively + [default] + aws_access_key_id = XXXXXXXXXXXXXXXXXXX + aws_secret_access_key = XXXXXXXXXXXXXXX + ``` + +:::tip +Harness CE recommends that you use the same secret name, that is, `cloud-secret`. Otherwise, you will need to update the `AWS_SHARED_CREDENTIALS_FILE` environment variable in the fault template with the new secret name and you won't be able to use the default health check probes. +::: + +Below is an example AWS policy to execute the fault. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "lambda:GetFunction", + "lambda:UpdateFunctionConfiguration", + "lambda:DeleteLayerVersion", + "lambda:GetLayerVersion", + "lambda:ListLayerVersions", + ], + "Resource": "*" + } + ] +} +``` + +:::info note +- Go to [AWS named profile for chaos](/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/security-configurations/aws-switch-profile) to use a different profile for AWS faults. +- Go to the [superset permission/policy](/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/security-configurations/policy-for-all-aws-faults) to execute all AWS faults. +- Go to [common tunables](/docs/chaos-engineering/use-harness-ce/chaos-faults/common-tunables-for-all-faults) and [AWS-specific tunables](/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/aws-fault-tunables) to tune the common tunables for all faults and AWS-specific tunables. +::: + +### Mandatory tunables + + + + + + + + + + + + + + + + + + + + + +
Tunable Description Notes
FUNCTION_NAME Name of the target Lambda function. It supports a single function name. For example, test-function.
LAMBDA_FUNCTION_LAYER_ARN Layer version that is identified by the unique ARN. Default: arn:aws:lambda:us-east-2:99999999999:layer:l-2:1,arn:aws:lambda:us-east-2:99999999999:layer:chaos_layer:1.
REGION Region name of the target Lambda function. For example, us-east-2.
+ +### Optional tunables + + + + + + + + + + + + + + + + + + + + + + + + + + +
Tunable Description Notes
TOTAL_CHAOS_DURATION Duration that you specify, through which chaos is injected into the target resource (in seconds). Default: 30 s. For more information, go to duration of the chaos.
AWS_SHARED_CREDENTIALS_FILE Path to the AWS secret credentials. Default: /tmp/cloud_config.yml.
CHAOS_INTERVAL Time interval between two successive deletions of reserved concurrency (in seconds). Default: 30 s. For more information, go to chaos interval.
RAMP_TIME Period to wait before and after injecting chaos (in seconds). For example, 30 s. For more information, go to ramp time.
diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/static/images/lambda-function-layer-detach.png b/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/static/images/lambda-function-layer-detach.png new file mode 100644 index 00000000000..bfb329bca56 Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/static/images/lambda-function-layer-detach.png differ diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/byoc/sql-failover.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/byoc/sql-failover.md new file mode 100644 index 00000000000..80f3a2029d6 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/byoc/sql-failover.md @@ -0,0 +1,259 @@ +--- +id: cloud-sql-failover-using-byoc +title: Cross-Project Cloud SQL Failover Using BYOC Injector +--- + +## Overview + +This topic provides how to implement a cross-project failover for Google Cloud SQL instances using a Bring Your Own Chaos (BYOC) injector. The [BYOC injector](/docs/chaos-engineering/use-harness-ce/chaos-faults/byoc/) facilitates the simulation of failover scenarios to test the resilience and high availability of Cloud SQL instances across different projects. + +--- + +## Prerequisites + +Before proceeding, ensure the following prerequisites are met: + +- Kubernetes > 1.16 +- Service account should have editor access (or owner access) to the GCP project. +- `litmus-admin` Kubernetes secret should have appropriate permissions to perform Cloud SQL Failover. + - cloudsql.instances.failover + - cloudsql.instances.list +- Harness CE provides two ways of providing permissions to the `litmus-admin` Kubernetes secret: + - [Using Gcloud Service Account Secret](/docs/chaos-engineering/use-harness-ce/chaos-faults/gcp/security-configurations/prepare-secret-for-gcp/) + - [Using Workload Identity](/docs/chaos-engineering/use-harness-ce/chaos-faults/gcp/gcp-iam-integration/) + +--- + +## Mandatory tunables + + + + + + + + + + + + + + + + + + + + + + +
Tunable Description Notes
LIB_IMAGE Image of the helper pod that contains the business logic for the custom fault. For more information, go to lib image
COMMAND Command to execute in the helper pod. For more information, go to command
ARGS Arguments to execute in the helper pod. For more information, go to args
+ +## Implementation + +You can use one of the following ways to initiate failover on cloud SQL instances: + +### Using GCP Rest APIs + +Google Cloud Platform provides APIs for all its services, allowing users and developers to interact with them programmatically. This flexibility is crucial for specifying the `LIB_IMAGE` required for chaos injection, particularly when using BYOC. To implement this method, provide scripts in your preferred programming language and build a custom image. For more details, refer to the [documentation](https://cloud.google.com/sql/docs/mysql/admin-api/rest). + +### Using gcloud Binary + +The `gcloud` binary offers a range of commands designed for interacting with the Cloud SQL Service, including operations like listing, updating, deleting, and initiating failover. Refer to the [documentation](https://cloud.google.com/sdk/gcloud/reference/sql/instances) for more information. + +This method is advantageous since it caters to individuals with varying levels of technical expertise, even those without extensive knowledge of coding languages or APIs. By leveraging a combination of `LIB_IMAGE` (a Docker image containing `gcloud`) and `ARGS` to concatenate the necessary commands into a single directive, you can seamlessly implement this approach. + +The choice of method depends on your preference. In this documentation, you will learn the second approach, that is, the `gcloud` binary. + +You will formulate a unified command for chaos injection, which can be specified within the `ARGS` tunable, alongside any image equipped with `gcloud`. + +#### Construct the Command for Cloud SQL Failover + +GCP provides a specific command for SQL failover, which requires two imputs (environment variables), which can be executed as follows: + +```bash +gcloud sql instances failover "${CLOUD_SQL_INSTANCE_NAME}" --project="${CLOUD_SQL_PROJECT}" -q +``` + +where + +- `CLOUD_SQL_INSTANCE_NAME`: is the name of the designated SQL Instance. +- `CLOUD_SQL_PROJECT`: is the name of the GCP Project in which the SQL instance is located. + +To confirm the occurrence of chaos injection, verify the zone of the SQL instance before and after the chaos injection. This verification is crucial as the zone would change due to the chaos injection. To achieve this, use the following command: + +```bash +gcloud sql instances describe "${CLOUD_SQL_INSTANCE_NAME}" --project "${CLOUD_SQL_PROJECT}" --format="get(gceZone)" +``` + +The command above provides a detailed description of the specificed SQL instance and displays the zone of the specific instance as a single output, which simplifies the output log for easy analysis. + + +The next step would be to integrate the commands described earlier, in a manner that allows you to do the following: +- Retrieve the zone of the target SQL instance before the chaos injection, +- Display this information in the logs, +- Initiate a SQL failover, +- Retrieve the zone of the SQL instance again, and +- Display this updated information in the logs. + +By following this approach, you can eliminate the need for manual verification through the GCP Console to observe the zone switch before and after the chaos injection. The combined command is: + +```bash +‌before_zone=$(gcloud sql instances describe "${CLOUD_SQL_INSTANCE_NAME}" + --project "${CLOUD_SQL_PROJECT}" --format="get(gceZone)") && + echo -e "Zone for the primary replica before failover + ${before_zone}\n" && gcloud sql instances failover + "${CLOUD_SQL_INSTANCE_NAME}" --project="${CLOUD_SQL_PROJECT}" -q + && after_zone=$(gcloud sql instances describe + "${CLOUD_SQL_INSTANCE_NAME}" --project "${CLOUD_SQL_PROJECT}" + --format="get(gceZone)") && echo -e "\nZone for the primary + replica after failover ${after_zone}" +``` + +To execute this command within chaos injection for real SQL failover scenarios, incorporate it within the `ARGS` tunable, as: + +```bash +- name: ARGS + value: before_zone=$(gcloud sql instances describe "${CLOUD_SQL_INSTANCE_NAME}" + --project "${CLOUD_SQL_PROJECT}" --format="get(gceZone)") && + echo -e "Zone for the primary replica before failover + ${before_zone}\n" && gcloud sql instances failover + "${CLOUD_SQL_INSTANCE_NAME}" --project="${CLOUD_SQL_PROJECT}" -q + && after_zone=$(gcloud sql instances describe + "${CLOUD_SQL_INSTANCE_NAME}" --project "${CLOUD_SQL_PROJECT}" + --format="get(gceZone)") && echo -e "\nZone for the primary + replica after failover ${after_zone}" +``` + +#### Helper Pod Image - LIB_IMAGE + +To execute this command successfully, specify an image for the `LIB_IMAGE` tunable. Considering that `harness/chaos-go-runner:main-latest` (a generic image utilized by Harness CE) already includes the `gcloud` binary, you can utilize the same image here. + +```bash +- name: LIB_IMAGE + value: docker.io/harness/chaos-go-runner:main-latest + ``` + +#### Command to Execute in Helper Pod + +The input to the `COMMAND` tunable depends on the image used in `LIB_IMAGE` tunable. + +- For shell script compatible image, the input would be `/bin/sh, -c`. +- For bash script compatible image, the input would be `/bin/bash, -c`. +- For `harness/chaos-go-runner:main-latest` image which supports bash script, the input would be `/bin/bash, -c`. + +:::tip +During experiment execution, the helper pod logs help understand the following: +- Zone of primary replica before SQL failover +- Chaos injection in progress +- Zone of primary replica after SQL failover + +During experiment execution, the experiment pod logs help understand the following: +- Status checks that occur before and after chaos +- Chaos injection in progress +::: + +### Check Status - Resilience Probes + +Create [resilience probes](/docs/chaos-engineering/use-harness-ce/probes/) that help conduct status checks to verify the health of the infrastructure or application and its readiness to endure chaos injection. You can configure these status checks at different stages of the chaos experiment, such as Start of Test (SOT), End of Test (EOT), OnChaos, Continuous, and Edge (Start & End) of Chaos Injection. + +:::tip +For this example, you can create a [command probe](/docs/chaos-engineering/use-harness-ce/probes/command-probe/) in the "Edge" mode as it allows you to verify the status of the SQL instance before and after chaos injection, ensuring that the SQL instance remains operational and healthy. +::: + +This validation process is executed through the GCloud Console. Solely relying on manual checks via the console may not be the most efficient method. + +You can use the `gcloud` command to fetch the status of the designated SQL instance by using the following command: + +```bash +gcloud sql instances describe "${CLOUD_SQL_INSTANCE_NAME}" --project "${CLOUD_SQL_PROJECT}" --format="get(state)" +``` + +### Cross Project IAM Setup - WorkLoad Identity + +You can use Workload Identity to understand how to configure a Google Cloud Platform (GCP) Service account so that it can be utilized by `litmus-admin` for conducting chaos experiments in other GCP Projects while still upholding control over permissions and access to resources. + +Suppose the chaos infrastructure is located in `Project A`, and the target SQL instance is located in `Project B`. Let us call the service account to employ as `SA`. + +You must establish the service account `SA` in `Project A` to facilitate its link to the Kubernetes service account `litmus-admin`, as specified in the [Harness CE documentation](/docs/chaos-engineering/use-harness-ce/chaos-faults/gcp/gcp-iam-integration/) and execute the Workload Identity mapping. +Once the service account mapping is in place, the next step is to grant relevant permissions to the service account in other projects. You can accomplish this by designating the same service account as a `PRINCIPAL` in `Project B` and assigning a role with the necessary permissions as mentioned in the [prerequisites](#prerequisites). + + ![cross-project setup](./static/images/cross-proj-setup.png) + +Based on these changes, the manifest would look like below: + +```yaml +apiVersion: litmuschaos.io/v1alpha1 + +kind: ChaosEngine + +metadata: + annotations: + probeRef: '[{"mode":"Edge","probeID":"cloud-sql-healthcheck"}]' + creationTimestamp: null + generateName: byoc-injector-mv0 + labels: + app.kubernetes.io/component: experiment-job + app.kubernetes.io/part-of: litmus + app.kubernetes.io/version: ci + name: byoc-injector + workflow_name: cloud-sql-instance-failover + workflow_run_id: "{{ workflow.uid }}" + namespace: "{{workflow.parameters.adminModeNamespace}}" +spec: + appinfo: {} + chaosServiceAccount: litmus-admin + components: + runner: + nodeSelector: + iam.gke.io/gke-metadata-server-enabled: "true" + resources: {} + engineState: active + experiments: + - args: + - -c + - ./experiments -name byoc-injector + command: + - /bin/bash + image: docker.io/harness/chaos-go-runner:main-latest + imagePullPolicy: Always + name: byoc-injector + spec: + components: + env: + - name: TOTAL_CHAOS_DURATION + value: "120" + - name: LIB_IMAGE + value: docker.io/harness/chaos-go-runner:main-latest + - name: COMMAND + value: /bin/bash, -c + - name: CLOUD_SQL_INSTANCE_NAME + value: "" + - name: CLOUD_SQL_PROJECT + value: "" + - name: ARGS + value: before_zone=$(gcloud sql instances describe "${CLOUD_SQL_INSTANCE_NAME}" + --project "${CLOUD_SQL_PROJECT}" --format="get(gceZone)") && + echo -e "Zone for the primary replica before failover + ${before_zone}\n" && gcloud sql instances failover + "${CLOUD_SQL_INSTANCE_NAME}" --project="${CLOUD_SQL_PROJECT}" -q + && after_zone=$(gcloud sql instances describe + "${CLOUD_SQL_INSTANCE_NAME}" --project "${CLOUD_SQL_PROJECT}" + --format="get(gceZone)") && echo -e "\nZone for the primary + replica after failover ${after_zone}" + nodeSelector: + iam.gke.io/gke-metadata-server-enabled: "true" + resources: {} + securityContext: + containerSecurityContext: {} + podSecurityContext: + runAsGroup: 0 + runAsUser: 2000 + statusCheckTimeouts: {} + rank: 0 + jobCleanUpPolicy: delete + terminationGracePeriodSeconds: 30 +status: + engineStatus: "" + experiments: null +``` \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/byoc/static/images/cross-proj-setup.png b/docs/chaos-engineering/use-harness-ce/chaos-faults/byoc/static/images/cross-proj-setup.png new file mode 100644 index 00000000000..033bb8a77c3 Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/chaos-faults/byoc/static/images/cross-proj-setup.png differ diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/comparison-ec2.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/comparison-ec2.md new file mode 100644 index 00000000000..e58ec4dd9d7 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/comparison-ec2.md @@ -0,0 +1,110 @@ +--- +title: Comparison of EC2 Chaos Approach for Kubernetes versus Linux +sidebar_position: 4 +description: Comparison of EC2 Chaos Approach for (Kubernetes + SSM) versus Native Linux agent. +--- + +This topic compares EC2 chaos injection approach for Kubernetes+SSM and Native Linux agent. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
AreaKubernetes agent driven EC2 chaos Native Linux agent driven EC2 chaos
Install Prerequisites/Agent Setup
  1. Installation of the agent needs user to be a cluster-admin OR mapped to cluster role with these permissions.
  2. +
  3. SSM Agent is installed (it runs with sudo by default) on the target EC2 instance(s).
  4. +
  5. Default SSM IAM role should be attached to the target EC2 instance(s).
  6. +
  7. Ensure that you either create a secret with account user credentials or map an appropriate IAM role reference/ARN to the chaos ServiceAccount to carry out the chaos injection.
Console access to the machine as root/sudo OR Ability to inject processes remotely over SSH as root/sudo.
Installed Components The K8s agent comprises the following stateless deployments in a dedicated namespace: subscriber, wf-controller, chaos-operator, exporter along with some secrets and ConfigMaps.The native Linux chaos agent comprises a systemd-based service (configured with post hook). The agent config, logs and cron configuration are stored in dedicated, predefined paths.
Dependencies (a combination of upstream Linux and Harness utilities required for chaos injection.)
  • They can be installed just-in-time by the experiment OR can be placed into the machine prior (in case of disconnected setups).
  • +
  • tc, stress-ng, jq, iproute2, tproxy, dns-interceptor
  • Installed as part of the agent installation process
  • +
  • tc, stress-ng, jq, iproute2, tproxy, byteman
Network Connectivity From Chaos Agent:
  1. Outbound over port 443 to Harness from Kubernetes cluster.
  2. +
  3. Outbound over 443 to cloud acc resource endpoints from Kubernetes cluster.
  4. +
  5. Outbound to application health endpoints (ones which will be used for resilience validation) from Kubernetes cluster.
  6. +
  7. From EC2 Instance: Outbound over port 443 to package repo/Harness S3 endpoints to pull dependencies (in connected mode).
  1. Outbound over port 443 to Harness from VM.
  2. +
  3. Outbound over port 443 to package repo/Harness S3 endpoints to pull dependencies (in connected mode).
  4. +
  5. Outbound to application health endpoints (ones which will be used for resilience validation) from VM.
Lifecycle Management
  • **Availability:** Tracked via Heartbeat. Can be scaled down to 0 replicas under idle conditions.
  • +
  • **Upgrade:** Automatic and manual upgrades supported.
  • +
  • **Note:** Automated upgrades only via Kubernetes Manifests. Helm bundle upgrades are manual/offline.
  • +
  • **Uninstall/Deletion:** The "Disconnect" operation from control plane removes the subscriber and configs/secrets involved in auth.
  • **Availability:** Tracked via Heartbeat. Service can be stopped under idle conditions.
  • +
  • **Upgrade:** Only Manual upgrades supported.
  • +
  • **Uninstall/Deletion:** Performed via an offline uninstaller utility.
Permissions/Access for Chaos InjectionDepends upon the nature of the fault. Master Policy for EC2 faults for all supported faults on EC2. Run experiments with root user.
Chaos Experiment Execution
  • **Max Execution Time:** Chaos Duration + Probe Validation Timeout + [~60-120s] (Relatively Higher)
  • +
  • **Note:** Involves generation of K8s events and creation of transient pods to carry out the fault business logic, which can add to overall execution time.
  • +
  • **Parallel Fault Support Within Experiment:** Yes
  • +
  • **Multi-Infra Support Within Experiment:** No
  • +
  • **Support for HTTP Probes:** Yes
  • +
  • **Support for Command Probes in Source Mode (custom validation via user-defined container images):** Yes
  • **Max Execution Time:** Chaos Duration + Probe Validation Timeout (Relatively Lower)
  • +
  • **Parallel Fault Support Within Experiment:** Yes
  • **Multi-Infra Support Within Experiment:** Yes
  • +
  • **Support for HTTP Probes:** Yes
  • +
  • **Support for Command Probes in Source Mode (custom validation via user-defined container images):** No
Execution Control
  • **Abort Support:** Yes. Internally invokes cancellation of the SSM command (which in turn is a bash script). However, there are some risks of continued operations as highlighted by AWS.
  • +
  • **SSM Agent Crash:** Dependent on AWS-native based recovery.
  • **Abort Support:** Yes. An abort-watcher ensures graceful cancellation of the chaos process.
  • +
  • **Chaos Agent Crash:** The agent service is configured with the right hooks (ExecStart/Stop) which removes all residual chaos on the system as a safety measure.
LogsLogs are based off the success of the SSM commands, with a need to explicitly fetch the stdout/stderr.Custom logs tracking each stage of the fault injection are available.
OS-Specific Fault CoverageNot availableAvailable
Custom Chaos Support (SSH, Load)AvailableNot available
APM Integrations for ProbesSupports Prometheus, Dynatrace, Datadog, NewRelic out-of-the-box.Dynatrace and Datadog supported out-of-the-box. Others can be implemented using custom/command probes.
Harness Chaos Management Feature Support (Cron, ChaosGuard, Gamedays, CD Integration)AvailableGameday support available
Agent Reuse for Managed Service ChaosSupportedNot Available
\ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/experiments.ts b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/experiments.ts index 8dba44ef52b..a98ab6fafe6 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/experiments.ts +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/experiments.ts @@ -227,7 +227,19 @@ export const experiments: ExperimentDetails[] = [ name: "Pod JVM trigger gc", description:"Pod JVM trigger gc triggers the garbage collector for a Java process executing in a Kubernetes pod. This causes unused (or out of scope) objects and variables to be garbage collected and recycled, thereby freeing up memory space.", tags: ["pod", "jvm"], - category: "kubernetes", + category: "kubernetes", + }, + { + name: "Pod JVM SQL Exception", + description:"Pod JVM SQL Exception injects exception in sql queries for a Java application executing in a Kubernetes pod", + tags: ["pod", "jvm", "sql", "exception"], + category: "kubernetes", + }, + { + name: "Pod JVM SQL Latency", + description:"Pod JVM SQL Latency injects latency in sql queries for a Java application executing in a Kubernetes pod", + tags: ["pod", "jvm", "sql", "latency"], + category: "kubernetes", }, { name: "Pod memory hog exec", diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/kubernetes.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/kubernetes.md index c142eb9b949..258fe875e55 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/kubernetes.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/kubernetes.md @@ -625,6 +625,38 @@ Pod JVM trigger gc triggers the garbage collector for a Java process executing i +### Pod JVM sql exception + +Pod JVM sql exception injects chaos into a Java application executing in a Kubernetes pod by invoking an SQL exception. + + +Use cases +- Determines the performance bottlenecks of the application +- Simulates SQL exceptions to ensure if application can recover gracefully +- Assess if the monitoring systems and alerting mechanisms can accurately detect and report SQL query exceptions in real-time. +- Determines the performance and resilience of the dependant application (or services) running on Kubernetes. + + + + + + +### Pod JVM sql latency + +Pod JVM sql latency injects chaos into a Java application executing in a Kubernetes pod by invoking latency in sql queries. + + +Use cases +- Determines the performance bottlenecks of the application +- Simulate database latency to evaluate how the application handles slower SQL queries +- Test the impact of SQL query latency on the end-user experience, ensuring the application behaves gracefully under slower response times. +- Determines the performance and resilience of the dependant application (or services) running on Kubernetes. + + + + + + ### Pod memory hog exec Pod memory hog exec is a Kubernetes pod-level chaos fault that consumes memory resources on the application container in megabytes. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-block.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-block.md index 2da35943379..90f9b8deaef 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-block.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-block.md @@ -16,6 +16,8 @@ Pod API block is a Kubernetes pod-level chaos fault that blocks the API requests ![Pod API Block](./static/images/pod-api-block.png) +[This](https://youtu.be/Cg5gbfFrJQs?si=3CDCr5kZ1TweWuSo) video provides a step-by-step walkthrough of the execution process for the Pod API Block experiment. + ## Use cases Pod API block: - Validates how well your system can handle disruptions in API services for a specific pod. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-latency.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-latency.md index 5281109140a..52ea3810570 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-latency.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-latency.md @@ -16,6 +16,9 @@ Pod API latency is a Kubernetes pod-level chaos fault that injects API request a ![Pod API Latency](./static/images/pod-api-latency.png) +[This](https://youtu.be/HI7sk7I13WU?si=JZMznTtzq2VRy4NC) video provides a step-by-step walkthrough of the execution process for the Pod API Latency experiment. + + ## Use cases Pod API latency: - Simulate high traffic scenarios and testing the resilience and performance of an application or API, where the API may experience delays due to heavy load. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-modify-body.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-modify-body.md index f218949d7e9..6f2af85d902 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-modify-body.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-modify-body.md @@ -17,6 +17,8 @@ Pod API modify body is a Kubernetes pod-level chaos fault that modifies the api ![Pod API Modify Body](./static/images/pod-api-modify-body.png) +[This](https://youtu.be/Dbr_KwfTxps?si=6uWUShJuht7LoY5r) video provides a step-by-step walkthrough of the execution process for the Pod API Modify Body experiment. + ## Use cases Pod API modify body: - It can be used for API testing, by replacing specific portions of the request or response body to simulate different scenarios and validate how your application handles different data variations. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-modify-header.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-modify-header.md index 140eb108f34..cf73ee5d633 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-modify-header.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-modify-header.md @@ -17,7 +17,10 @@ Pod API modify header is a Kubernetes pod-level chaos fault that overrides the h ![Pod API Modify Header](./static/images/pod-api-modify-header.png) +[This](https://youtu.be/sIkUxtnQY_o?si=LfwHi2rI559xyRr4) video provides a step-by-step walkthrough of the execution process for the Pod API Modify Header experiment. + ## Use cases + Pod API modify header: - Simulate different authentication states or test the behavior of your application when using invalid or expired credentials. - This fault can be utilized to validate the caching behavior of your API or client applications. By overriding cache-related headers, such as the "Cache-Control" or "ETag" headers, you can simulate cache validation scenarios. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-status-code.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-status-code.md index 3a395e81408..86f612a79e3 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-status-code.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-api-status-code.md @@ -16,7 +16,7 @@ Pod API status code is a Kubernetes pod-level chaos fault that change the API re ![Pod API Status Code](./static/images/pod-api-status-code.png) -For a video tutorial, go to [pod API status code](https://youtu.be/h_lnNCG-j2o) that walks through the execution of the experiment. +[This](https://youtu.be/h_lnNCG-j2o) video provides a step-by-step walkthrough of the execution process for the Pod API Status Code experiment. ## Use cases Pod API status code: diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-dns-error.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-dns-error.md index 23c545c096f..8b0e0d0e57d 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-dns-error.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-dns-error.md @@ -99,6 +99,11 @@ permissions: Determines whether the DNS query exactly matches one of the targets or uses any of the targets as a substring. It can be exact or substring. If this is not provided, it is set to exact. For more information, go to match scheme + + TRANSACTION_PERCENTAGE + Percentage of the DNS queries to be affected. + It supports values in range (0,100]. It targets all requests if not provided. For more information, go to transaction percentage . + PODS_AFFECTED_PERC Percentage of total pods to target. Provide numeric values. @@ -226,6 +231,40 @@ spec: value: '60' ``` +### Transaction Percentage + +The percentage of DNS queries that should be affected, with supported values in the range (0, 100]. If not specified, the fault targets all queries. Tune it by using the `TRANSACTION_PERCENTAGE` environment variable. + +The following YAML snippet illustrates the use of this environment variable: + +[embedmd]:# (./static/manifests/pod-dns-error/transaction-percentage.yaml yaml) +```yaml +# contains transaction percentage for the dns error +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-dns-error + spec: + components: + env: + # provide the transaction percentage within (0,100] range + # for example, it will affect 50% of the total dns queries + - name: TRANSACTION_PERCENTAGE + value: '50' + - name: TOTAL_CHAOS_DURATION + value: '60' +``` + ### Container runtime and socket path The `CONTAINER_RUNTIME` and `SOCKET_PATH` environment variables to set the container runtime and socket file path, respectively. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-dns-spoof.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-dns-spoof.md index 92d46be509a..d21e8b9ad24 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-dns-spoof.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-dns-spoof.md @@ -11,7 +11,7 @@ Pod DNS spoof is a Kubernetes pod-level chaos fault that injects chaos into pods ![Pod DNS Spoof](./static/images/dns-chaos.png) -[This](https://youtu.be/YA97cvgbpTI) video walks through the execution of the pod IO latency experiment. +[This](https://youtu.be/YA97cvgbpTI) video provides a step-by-step walkthrough of the execution process for the Pod DNS Spoof experiment. ## Use cases @@ -88,6 +88,11 @@ permissions: Map of the target host names. For example, '{"abc.com":"spoofabc.com"}' where key is the host name to be spoofed and value is the host name to which the key is spoofed or redirected to. If not provided, no host names or domains are spoofed. + + TRANSACTION_PERCENTAGE + Percentage of the dns queries to be affected. + It supports values in range (0,100]. It targets all requests if not provided. For more information, go to transaction percentage . + PODS_AFFECTED_PERC Percentage of total pods to target. Provide numeric values. @@ -154,6 +159,40 @@ spec: value: "60" ``` +### Transaction Percentage + +The percentage of dns queries to be affected, with supported values in the range (0, 100]. If not specified, it targets all queries. Tune it by using the `TRANSACTION_PERCENTAGE` environment variable. + +The following YAML snippet illustrates the use of this environment variable: + +[embedmd]:# (./static/manifests/pod-dns-spoof/transaction-percentage.yaml yaml) +```yaml +# contains transaction percentage for the dns spoofing +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-dns-spoof + spec: + components: + env: + # provide the transaction percentage within (0,100] range + # for example, it will affect 50% of the total dns queries + - name: TRANSACTION_PERCENTAGE + value: '50' + - name: TOTAL_CHAOS_DURATION + value: '60' +``` + ### Container runtime and socket path The `CONTAINER_RUNTIME` and `SOCKET_PATH` environment variables to set the container runtime and socket file path, respectively. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-attribute-override.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-attribute-override.md index b9b383b6ae3..4aeccd74c06 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-attribute-override.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-attribute-override.md @@ -14,6 +14,9 @@ This fault should be used as a sanity test for validating your application's fai ![Pod IO Attribute Override](./static/images/pod-io-attribute-override.png) +[This](https://youtu.be/chk5K754J_4?si=df_2bEG6yXKtjGEl) video provides a step-by-step walkthrough of the execution process for the Pod IO Attribute Override experiment. + + ## Use cases Pod IO attribute override: - Tests whether your application is able to detect and respond to unexpected changes in file attributes, which can help you ensure that your system remains compliant and secure. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-error.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-error.md index bc45af74fa7..0508fd474e3 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-error.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-error.md @@ -14,7 +14,7 @@ When triggered, it causes the call to fail and return an error, potentially disr ![Pod IO Error](./static/images/pod-io-error.png) -For a video tutorial, go to [pod IO error](https://youtu.be/_EyKwv5UupU) that walks through the execution of the experiment. +[This](https://youtu.be/_EyKwv5UupU) video provides a step-by-step walkthrough of the execution process for the Pod IO Error experiment. ## Use cases Pod IO error: diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-latency.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-latency.md index 8e80f539212..a58aeaeabaf 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-latency.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-latency.md @@ -14,9 +14,7 @@ This can help identify performance bottlenecks, test the system's ability to han ![Pod IO Latency](./static/images/pod-io-latency.png) -For a video tutorial, go to [pod IO error](https://youtu.be/HI7sk7I13WU) that walks through the execution of the experiment. - -[This](https://youtu.be/HI7sk7I13WU) video walks through the execution of the pod IO latency experiment. +[This](https://youtu.be/HI7sk7I13WU) video provides a step-by-step walkthrough of the execution process for the Pod IO Latency experiment. ## Use cases Pod IO latency: diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-jvm-sql-exception.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-jvm-sql-exception.md new file mode 100644 index 00000000000..a8f070bd78a --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-jvm-sql-exception.md @@ -0,0 +1,167 @@ +--- +id: pod-jvm-sql-exception +title: Pod JVM SQL Exception +--- + +Pod JVM SQL Exception fault simulates SQL query failures by raising exceptions for SQL queries executed by the Java process running inside a Kubernetes pod. This helps test the application's behavior and resilience against SQL-related errors. + +:::tip +JVM chaos faults use the [Byteman utility](https://byteman.jboss.org/) to inject chaos faults into the JVM. +::: + +![Pod JVM SQL Exception](./static/images/pod-jvm-sql-exception.png) + +### Use cases +Pod JVM SQL exception: +- Validate the application's resilience by simulating SQL exceptions to ensure it can recover gracefully, retry operations, or switch to backup databases without affecting functionality. +- Assess if the monitoring systems and alerting mechanisms can accurately detect and report SQL query exceptions in real-time. +- Trigger exception-handling paths in the application to ensure coverage of edge cases related to SQL query failures during testing. + + +### Mandatory tunables + + + + + + + + + + + + + + + + + + + + + + + + + + +
Tunable Description Notes
TABLE The name of the SQL table to be targeted. For more information, go to Parameters
SQL_DATA_ACCESS_FRAMEWORK The name of the data access framework. It supports MYSQL5, MYSQL8, and HIBERNATE types. For more information, go to Parameters
SQL_EXCEPTION_CLASS The name of the exception class. For more information, go to Parameters
SQL_EXCEPTION_MESSAGE The exception message to be raised. For more information, go to Parameters
+ +### Optional tunables + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Tunable Description Notes
TOTAL_CHAOS_DURATION Duration through which chaos is injected into the target resource. Should be provided in [numeric-hours]h[numeric-minutes]m[numeric-seconds]s format. Default: 30s. Examples: 1m25s, 1h3m2s, 1h3s. For more information, go to duration of the chaos.
SQL_OPERATION The type of SQL query to be targeted. If not provided, it targets all SQL queries. For example: select. For more information, go to Parameters
TRANSACTION_PERCENTAGE The percentage of total SQL queries to be targeted. Supports percentage in (0.00,1.00] range. If not provided, it targets all SQL queries. For more information, go to Parameters
POD_AFFECTED_PERCENTAGE Percentage of total pods to target. Provide numeric values. Default: 0 (corresponds to 1 replica). For more information, go to pods affected percentage.
JAVA_HOME Path to the Java installation directory. For example, /tmp/dir/jdk.
BYTEMAN_PORT Port used by the Byteman agent. Default: 9091.
CONTAINER_RUNTIME Container runtime interface for the cluster. Default: containerd. Support values: docker, containerd and crio. For more information, go to container runtime.
SOCKET_PATH Path of the containerd or crio or docker socket file. Default: /run/containerd/containerd.sock. For more information, go to socket path.
RAMP_TIME Period to wait before and after injecting chaos. Should be provided in [numeric-hours]h[numeric-minutes]m[numeric-seconds]s format. Default: 0s. Examples: 1m25s, 1h3m2s, 1h3s. For more information, go to ramp time.
SEQUENCE Sequence of chaos execution for multiple target pods. Default: parallel. Supports serial and parallel. For more information, go to sequence of chaos execution.
TARGET_CONTAINER Name of the container subject to API header modification. None. For more information, go to target specific container
TARGET_PODS Comma-separated list of application pod names subject to pod HTTP modify body. If not provided, the fault selects target pods randomly based on provided appLabels. For more information, go to target specific pods.
NODE_LABEL It filters the target pods that are scheduled on nodes matching the specified `NODE_LABEL`. For more information, go to node label.
LIB_IMAGE Image used to inject chaos. Default: harness/ddcr-faults:main-latest. For more information, go to image used by the helper pod.
+ +### Parameters + +The following YAML snippet illustrates the use of these tunables: + +[embedmd]:# (./static/manifests/pod-jvm-sql-exception/params.yaml yaml) +```yaml +kind: KubernetesChaosExperiment +apiVersion: litmuschaos.io/v1alpha1 +metadata: + name: pod-jvm-sql-exception + namespace: hce +spec: + tasks: + - definition: + chaos: + env: + - name: TOTAL_CHAOS_DURATION + value: "60" + # provide the sql table name + - name: TABLE + value: "product" + # provide the sql operation name + # it supports select, insert, update, delete, replace types + - name: SQL_OPERATION + value: "select" + # name of the data access framework + # it supports MYSQL5, MYSQL8, HIBERNATE types + - name: SQL_DATA_ACCESS_FRAMEWORK + value: "MYSQL8" + # name of the sql exception class + - name: SQL_EXCEPTION_CLASS + value: "com.mysql.cj.exceptions.CJException" + # provide the exception message + - name: SQL_EXCEPTION_MESSAGE + value: "CHAOS BOOM!" + # provide the transaction percentage + - name: TRANSACTION_PERCENTAGE + value: "50" +``` \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-jvm-sql-latency.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-jvm-sql-latency.md new file mode 100644 index 00000000000..759316b45c5 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-jvm-sql-latency.md @@ -0,0 +1,157 @@ +--- +id: pod-jvm-sql-latency +title: Pod JVM SQL Latency +--- + +Pod JVM SQL Latency fault introduces latency in the SQL queries executed by the Java process running inside a Kubernetes pod. +:::tip +JVM chaos faults use the [Byteman utility](https://byteman.jboss.org/) to inject chaos faults into the JVM. +::: + +![Pod JVM SQL Latency](./static/images/pod-jvm-sql-latency.png) + +### Use cases +Pod JVM SQL latency: +- Simulate database latency to evaluate how the application handles slower SQL queries, assess system performance under delayed database responses, and identify potential bottlenecks in handling high volumes of requests. +- Test the impact of SQL query latency on the end-user experience, ensuring the application behaves gracefully under slower response times. This includes validating timeouts, retries, and fallback mechanisms to maintain a seamless user experience. +- Ensure that the application can handle delayed SQL queries without failing. Test timeout configurations, error-handling strategies, and automatic recovery processes to verify that the system can withstand latency-induced delays without causing critical failures. + +### Mandatory tunables + + + + + + + + + + + + + + + + + + + + + +
Tunable Description Notes
TABLE The name of the SQL table to be targeted. For more information, go to Parameters
SQL_DATA_ACCESS_FRAMEWORK The name of the data access framework. It supports MYSQL5, MYSQL8, and HIBERNATE types. For more information, go to Parameters
SQL_LATENCY The latency to be injected into the SQL queries (in ms). For more information, go to Parameters
+ +### Optional tunables + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Tunable Description Notes
TOTAL_CHAOS_DURATION Duration through which chaos is injected into the target resource. Should be provided in [numeric-hours]h[numeric-minutes]m[numeric-seconds]s format. Default: 30s. For example: 1m25s, 1h3m2s, 1h3s. For more information, go to duration of the chaos.
SQL_OPERATION The type of SQL query to be targeted. If not provided it targets all SQL queries. For example: select. For more information, go to Parameters
TRANSACTION_PERCENTAGE The percentage of total SQL queries to be targeted. Supports percentage in (0.00,1.00] range. If not provided, it targets all SQL queries. For more information, go to Parameters
POD_AFFECTED_PERCENTAGE Percentage of total pods to target. Provide numeric values. Default: 0 (corresponds to 1 replica). For more information, go to pods affected percentage
JAVA_HOME Path to the Java installation directory. For example, /tmp/dir/jdk.
BYTEMAN_PORT Port used by the Byteman agent. Default: 9091.
CONTAINER_RUNTIME Container runtime interface for the cluster Default: containerd. Support values: docker, containerd and crio. For more information, go to container runtime.
SOCKET_PATH Path of the containerd or crio or docker socket file. Default: /run/containerd/containerd.sock. For more information, go to socket path.
RAMP_TIME Period to wait before and after injecting chaos. Should be provided in [numeric-hours]h[numeric-minutes]m[numeric-seconds]s format. Default: 0s. For example: 1m25s, 1h3m2s, 1h3s. For more information, go to ramp time.
SEQUENCE Sequence of chaos execution for multiple target pods. Default: parallel. Supports serial and parallel. For more information, go to sequence of chaos execution.
TARGET_CONTAINER Name of the container subject to API header modification. None. For more information, go to target specific container
TARGET_PODS Comma-separated list of application pod names subject to pod HTTP modify body. If not provided, the fault selects target pods randomly based on provided appLabels. For more information, go to target specific pods.
NODE_LABEL It filters the target pods that are scheduled on nodes matching the specified `NODE_LABEL`. For more information, go to node label.
LIB_IMAGE Image used to inject chaos. Default: harness/ddcr-faults:main-latest. For more information, go to image used by the helper pod.
+ +### Parameters + +The following YAML snippet illustrates the use of these tunables: + +[embedmd]:# (./static/manifests/pod-jvm-sql-latency/params.yaml yaml) +```yaml +kind: KubernetesChaosExperiment +apiVersion: litmuschaos.io/v1alpha1 +metadata: + name: pod-jvm-sql-latency + namespace: hce +spec: + tasks: + - definition: + chaos: + env: + - name: TOTAL_CHAOS_DURATION + value: "60" + # provide the sql table name + - name: TABLE + value: "product" + # provide the sql operation name + # it supports select, insert, update, delete, replace types + - name: SQL_OPERATION + value: "select" + # name of the data access framework + # it supports MYSQL5, MYSQL8, HIBERNATE types + - name: SQL_DATA_ACCESS_FRAMEWORK + value: "MYSQL8" + # provide the latency in ms + - name: SQL_LATENCY + value: "2000" #in ms + # provide the transaction percentage + - name: TRANSACTION_PERCENTAGE + value: "50" +``` \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-corruption.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-corruption.md index 9f24147f120..4b71b953548 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-corruption.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-corruption.md @@ -81,6 +81,11 @@ permissions: Packet corruption in percentage. Default: 100. For more information, go to network packet corruption. + + CORRELATION + Degree of dependency between consecutive packets + It should be in range of (0,100]. For more information, go to correlation. + CONTAINER_RUNTIME Container runtime interface for the cluster. @@ -185,6 +190,37 @@ spec: value: '60' ``` +### Correlation + +Degree of dependency between consecutive packets. Tune it by using the `CORRELATION` environment variable. + +The following YAML snippet illustrates the use of this environment variable: + +[embedmd]:# (./static/manifests/pod-network-corruption/correlation.yaml yaml) +```yaml +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-network-corruption + spec: + components: + env: + - name: CORRELATION + value: '100' #in percentage + - name: TOTAL_CHAOS_DURATION + value: '60' +``` + ### Destination IPs and destination hosts Default IPs and hosts whose traffic is interrupted due to the network faults. Tune it by using the `DESTINATION_IPS` and `DESTINATION_HOSTS` environment variables, respectively. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-duplication.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-duplication.md index 8f61fb5ef6e..0a3952cdcc9 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-duplication.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-duplication.md @@ -81,6 +81,11 @@ permissions: Packet duplication (in percentage). Default: 100 %. For more information, go to network packet duplication. + + CORRELATION + Degree of dependency between consecutive packets + It should be in range of (0,100]. For more information, go to correlation. + CONTAINER_RUNTIME Container runtime interface for the cluster. @@ -186,6 +191,37 @@ spec: value: "60" ``` +### Correlation + +Degree of dependency between consecutive packets. Tune it by using the `CORRELATION` environment variable. + +The following YAML snippet illustrates the use of this environment variable: + +[embedmd]:# (./static/manifests/pod-network-duplication/correlation.yaml yaml) +```yaml +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-network-duplication + spec: + components: + env: + - name: CORRELATION + value: '100' #in percentage + - name: TOTAL_CHAOS_DURATION + value: '60' +``` + ### Destination IPs and destination hosts Default IPs and hosts whose traffic is interrupted due to the network faults. Tune it by using the `DESTINATION_IPS` and `DESTINATION_HOSTS` environment variables, respectively. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-latency.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-latency.md index be777d56bf0..29bbe2fc628 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-latency.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-latency.md @@ -89,6 +89,11 @@ permissions: Delay (in milliseconds). Provide numeric values. Defaults to 2000. For more information, go to network latency. + + CORRELATION + Degree of dependency between consecutive packets + It should be in range of (0,100]. For more information, go to correlation. + JITTER Network jitter (in milliseconds). Provide numeric values. @@ -199,6 +204,37 @@ spec: value: "60" ``` +### Correlation + +Degree of dependency between consecutive packets. Tune it by using the `CORRELATION` environment variable. + +The following YAML snippet illustrates the use of this environment variable: + +[embedmd]:# (./static/manifests/pod-network-latency/correlation.yaml yaml) +```yaml +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-network-latency + spec: + components: + env: + - name: CORRELATION + value: '100' #in percentage + - name: TOTAL_CHAOS_DURATION + value: '60' +``` + ### Destination IPs and destination hosts Default IPs and hosts whose traffic is interrupted due to the network faults. Tune it by using the `DESTINATION_IPS` and `DESTINATION_HOSTS` environment variabes, respectively. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-loss.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-loss.md index 19a635bc83a..40fd4cf4086 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-loss.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-loss.md @@ -88,6 +88,11 @@ permissions: Packet loss (in percentage). Default: 100 %. For more information, go to network packet loss. + + CORRELATION + Degree of dependency between consecutive packets + It should be in range of (0,100]. For more information, go to correlation. + CONTAINER_RUNTIME Container runtime interface for the cluster. @@ -186,6 +191,38 @@ spec: - name: TOTAL_CHAOS_DURATION value: '60' ``` + +### Correlation + +Degree of dependency between consecutive packets. Tune it by using the `CORRELATION` environment variable. + +The following YAML snippet illustrates the use of this environment variable: + +[embedmd]:# (./static/manifests/pod-network-loss/correlation.yaml yaml) +```yaml +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-network-loss + spec: + components: + env: + - name: CORRELATION + value: '100' #in percentage + - name: TOTAL_CHAOS_DURATION + value: '60' +``` + ### Destination IPs and destination hosts Default IPs and hosts whose traffic is interrupted because of the network faults. Tune it by using the `DESTINATION_IPS` and `DESTINATION_HOSTS` environment variables, respectively. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-rate-limit.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-rate-limit.md index 7de52a5f47e..0083bbb7c07 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-rate-limit.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-network-rate-limit.md @@ -11,7 +11,10 @@ Pod network rate limit is a Kubernetes pod-level chaos fault that generates Traf ![Pod Network Rate Limit](./static/images/pod-network-rate-limit.png) +[This](https://youtu.be/01efVOyFGl8?si=414-AX6yVn2GqfON) video provides a step-by-step walkthrough of the execution process for the Pod Network Rate Limit experiment. + ## Use cases + Pod network rate limit: - Assess how well applications and services perform under constrained network bandwidth, helping identify potential bottlenecks and weaknesses. - Ensure that critical services receive the necessary network bandwidth allocation while non-essential services are appropriately limited to maintain overall system stability. diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/images/pod-jvm-sql-exception.png b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/images/pod-jvm-sql-exception.png new file mode 100644 index 00000000000..0402ced40d1 Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/images/pod-jvm-sql-exception.png differ diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/images/pod-jvm-sql-latency.png b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/images/pod-jvm-sql-latency.png new file mode 100644 index 00000000000..bbf5f8c7a36 Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/images/pod-jvm-sql-latency.png differ diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-dns-error/transaction-percentage.yaml b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-dns-error/transaction-percentage.yaml new file mode 100644 index 00000000000..6cd2040fe02 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-dns-error/transaction-percentage.yaml @@ -0,0 +1,24 @@ +# contains transaction percentage for the dns error +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-dns-error + spec: + components: + env: + # provide the transaction percentage within (0,100] range + # for example, it will affect 50% of the total dns queries + - name: TRANSACTION_PERCENTAGE + value: '50' + - name: TOTAL_CHAOS_DURATION + value: '60' \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-dns-spoof/transaction-percentage.yaml b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-dns-spoof/transaction-percentage.yaml new file mode 100644 index 00000000000..d717f984795 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-dns-spoof/transaction-percentage.yaml @@ -0,0 +1,24 @@ +# contains transaction percentage for the dns spoofing +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-dns-spoof + spec: + components: + env: + # provide the transaction percentage within (0,100] range + # for example, it will affect 50% of the total dns queries + - name: TRANSACTION_PERCENTAGE + value: '50' + - name: TOTAL_CHAOS_DURATION + value: '60' \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-jvm-sql-exception/params.yaml b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-jvm-sql-exception/params.yaml new file mode 100644 index 00000000000..0d42fa91068 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-jvm-sql-exception/params.yaml @@ -0,0 +1,32 @@ +kind: KubernetesChaosExperiment +apiVersion: litmuschaos.io/v1alpha1 +metadata: + name: pod-jvm-sql-exception + namespace: hce +spec: + tasks: + - definition: + chaos: + env: + - name: TOTAL_CHAOS_DURATION + value: "60" + # provide the sql table name + - name: TABLE + value: "product" + # provide the sql operation name + # it supports select, insert, update, delete, replace types + - name: SQL_OPERATION + value: "select" + # name of the data access framework + # it supports MYSQL5, MYSQL8, HIBERNATE types + - name: SQL_DATA_ACCESS_FRAMEWORK + value: "MYSQL8" + # name of the sql exception class + - name: SQL_EXCEPTION_CLASS + value: "com.mysql.cj.exceptions.CJException" + # provide the exception message + - name: SQL_EXCEPTION_MESSAGE + value: "CHAOS BOOM!" + # provide the transaction percentage + - name: TRANSACTION_PERCENTAGE + value: "50" \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-jvm-sql-latency/params.yaml b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-jvm-sql-latency/params.yaml new file mode 100644 index 00000000000..fb1b7336b42 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-jvm-sql-latency/params.yaml @@ -0,0 +1,29 @@ +kind: KubernetesChaosExperiment +apiVersion: litmuschaos.io/v1alpha1 +metadata: + name: pod-jvm-sql-latency + namespace: hce +spec: + tasks: + - definition: + chaos: + env: + - name: TOTAL_CHAOS_DURATION + value: "60" + # provide the sql table name + - name: TABLE + value: "product" + # provide the sql operation name + # it supports select, insert, update, delete, replace types + - name: SQL_OPERATION + value: "select" + # name of the data access framework + # it supports MYSQL5, MYSQL8, HIBERNATE types + - name: SQL_DATA_ACCESS_FRAMEWORK + value: "MYSQL8" + # provide the latency in ms + - name: SQL_LATENCY + value: "2000" #in ms + # provide the transaction percentage + - name: TRANSACTION_PERCENTAGE + value: "50" \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-corruption/correlation.yaml b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-corruption/correlation.yaml new file mode 100644 index 00000000000..350d0587329 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-corruption/correlation.yaml @@ -0,0 +1,21 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-network-corruption + spec: + components: + env: + - name: CORRELATION + value: '100' #in percentage + - name: TOTAL_CHAOS_DURATION + value: '60' \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-duplication/correlation.yaml b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-duplication/correlation.yaml new file mode 100644 index 00000000000..12ec8f6fcda --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-duplication/correlation.yaml @@ -0,0 +1,21 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-network-duplication + spec: + components: + env: + - name: CORRELATION + value: '100' #in percentage + - name: TOTAL_CHAOS_DURATION + value: '60' \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-latency/correlation.yaml b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-latency/correlation.yaml new file mode 100644 index 00000000000..2e55efa303a --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-latency/correlation.yaml @@ -0,0 +1,21 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-network-latency + spec: + components: + env: + - name: CORRELATION + value: '100' #in percentage + - name: TOTAL_CHAOS_DURATION + value: '60' \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-loss/correlation.yaml b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-loss/correlation.yaml new file mode 100644 index 00000000000..1de19d0ed47 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/static/manifests/pod-network-loss/correlation.yaml @@ -0,0 +1,21 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: engine-nginx +spec: + engineState: "active" + annotationCheck: "false" + appinfo: + appns: "default" + applabel: "app=nginx" + appkind: "deployment" + chaosServiceAccount: litmus-admin + experiments: + - name: pod-network-loss + spec: + components: + env: + - name: CORRELATION + value: '100' #in percentage + - name: TOTAL_CHAOS_DURATION + value: '60' \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/time-chaos.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/time-chaos.md index 9b94cb9a8cb..69b48223fdb 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/time-chaos.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/time-chaos.md @@ -11,9 +11,7 @@ Time chaos is a Kubernetes pod-level fault that introduces controlled time offse ![Time Chaos](./static/images/time-chaos.png) - -For a video tutorial, go to [time chaos](https://youtu.be/9S_wnY1rLfs) that walks through the execution of the experiment. - +[This](https://youtu.be/9S_wnY1rLfs) video provides a step-by-step walkthrough of the execution process for the Time Chaos experiment. ## Use cases Time Chaos: diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/tkgi/permissions.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/tkgi/permissions.md index dbad1365833..5459589d20a 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/tkgi/permissions.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/tkgi/permissions.md @@ -34,9 +34,9 @@ This topic describes the HCE platform requires to execute chaos experiments. Access requirements for advanced chaos experiments -
  • Chaos ServiceAccount: [consolidated serviceaccount for advanced pod and node chaos]
  • -
  • Container Runtime privileges: [recommended psp for advanced chaos]
  • -
  • Refer to Chaos Experiment Flow for Microservice Targets section in the architecture doc for more details on need for the privileges
+ Chaos deployment and architecture details diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-cpu-stress.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-cpu-stress.md index 98d4c7a89f8..86aebf4d6df 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-cpu-stress.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-cpu-stress.md @@ -37,7 +37,7 @@ This fault uses [`stress-ng`](https://github.com/ColinIanKing/stress-ng), which load Percentage load to be exerted on a single CPU core. - Default: 100 %. + Default: 100 %. 0 refers to no load and 100 refers to full load. workers @@ -81,6 +81,11 @@ spec: The `load` input variable exerts the CPU load (in percentage) per core. +:::tip +- The load generated by the stress process may vary based on factors like overall processor load and the responsiveness of the system scheduler. +- It is recommended that the number of CPU cores provided as input should be equal to or greater than the total number of logical CPU cores. +::: + The following YAML snippet illustrates the use of this input variable: [embedmd]:# (./static/manifests/linux-cpu-stress/load.yaml yaml) diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-disk-io-stress.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-disk-io-stress.md index 85681c3958a..08c6fe13c4d 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-disk-io-stress.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-disk-io-stress.md @@ -12,6 +12,12 @@ import FaultPermissions from './shared/fault-permissions.md' Linux disk IO stress applies stress on the disk of the target Linux machines over I/O operations for a specific duration. +It aims to consume the I/O bandwidth by performing frequent writes and reads to and from the disk, respectively. Consequently, the size of the file created for this operation changes frequently. + +:::tip +To understand the impact of the fault, check the available I/O bandwidth before and during chaos. +::: + ![Linux disk IO stress](./static/images/linux-disk-io-stress.png) ## Use cases diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-memory-stress.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-memory-stress.md index 7fff1cf46e0..89e45c2306d 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-memory-stress.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/linux-memory-stress.md @@ -85,6 +85,11 @@ spec: The `memory` input variable specifies the amount of memory to be filled. +:::tip +- The `stress-ng` package attempts to utilize a percentage of the available free memory using each stressor process. +- It is recommended that the number of workers should be equal to or greater than the total number of logical CPU cores. +::: + The following YAML snippet illustrates the use of this input variable: [embedmd]:# (./static/manifests/linux-memory-stress/memory.yaml yaml) diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/experiments.ts b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/experiments.ts index 099f6628f7f..cbde46fdf79 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/experiments.ts +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/experiments.ts @@ -8,6 +8,13 @@ export const experiments: ExperimentDetails[] = [ tags: ["cpu"], category: "windows", }, + { + name: "Windows Disk stress", + description: + "Windows disk stress injects disk stress into a Windows OS based VM, by consuming and exhausting the disk resources on the target Windows machine.", + tags: ["disk"], + category: "windows", + }, { name: "Windows Memory stress", description: @@ -50,4 +57,11 @@ export const experiments: ExperimentDetails[] = [ tags: ["network", "loss"], category: "windows", }, + { + name: "Windows Process Kill", + description: + "Windows process kill kills the target processes that are running as a part of a Windows OS based VM. The services that are disrupted might be running in the VM, and this fault kills their underlying processes or threads.", + tags: ["process", "kill"], + category: "windows", + }, ]; diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/images/windows-disk-stress.png b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/images/windows-disk-stress.png new file mode 100644 index 00000000000..084c98f7440 Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/images/windows-disk-stress.png differ diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/images/windows-process-kill.png b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/images/windows-process-kill.png new file mode 100644 index 00000000000..a2ad843007a Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/images/windows-process-kill.png differ diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/manifests/windows-disk-stress/params.yaml b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/manifests/windows-disk-stress/params.yaml new file mode 100644 index 00000000000..255985e302c --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/manifests/windows-disk-stress/params.yaml @@ -0,0 +1,23 @@ +apiVersion: litmuchaos.io/v1alpha1 +kind: MachineFault +metadata: + name: windows-disk-stress + labels: + name: disk-stress +spec: + infraType: windows + env: + - name: MEMORY_CONSUMPTION + value: "1024" + - name: BLOCK_SIZE_IN_KILOBYTES + value: "128" + - name: WRITE_PERCENTAGE + value: "100" + - name: NUMBER_OF_WORKERS + value: "2" + - name: DISK_PATH + value: "C:\\" + - name: DURATION + value: "30s" + - name: RAMP_TIME # optional tunable + value: "" \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/manifests/windows-process-kill/params.yaml b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/manifests/windows-process-kill/params.yaml new file mode 100644 index 00000000000..803c0db53c8 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/static/manifests/windows-process-kill/params.yaml @@ -0,0 +1,19 @@ +apiVersion: litmuchaos.io/v1alpha1 +kind: MachineFault +metadata: + name: windows-process-kill + labels: + name: process-kill +spec: + infraType: windows + env: + - name: PROCESS_IDS + value: "" + - name: PROCESS_NAMES + value: "" + - name: FORCE + value: "disable" + - name: DURATION + value: "30s" + - name: RAMP_TIME + value: "" \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/windows-disk-stress.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/windows-disk-stress.md new file mode 100644 index 00000000000..cb170619b8d --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/windows-disk-stress.md @@ -0,0 +1,104 @@ +--- +id: windows-disk-stress +title: Windows Disk stress +--- + +Windows disk stress injects disk stress into a Windows OS based VM, by consuming and exhausting the disk resources on the target Windows machine. + +![Windows disk stress](./static/images/windows-disk-stress.png) + +## Use cases +Windows disk stress: +- Simulates a lack of disk for processes running on the application, causing performance degradation and system slowdowns. +- Simulates slow application traffic or resource exhaustion, leading to degradation in the performance of processes on the machine. + +### Prerequisites +- Ensure that the [prerequisites](/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/prerequisites) are fulfilled before executing the experiment. + +### External packages +This fault uses [Diskspd](https://learn.microsoft.com/en-us/azure/azure-local/manage/diskspd-overview), which is installed as part of the infrastructure installation. + +### Mandatory tunables + + + + + + + + + + + + +
Tunable Description Notes
MEMORY_CONSUMPTION Amount of stress applied on target Windows VM (in MB). Default: 1024.
+ +### Optional tunables + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Tunable Description Notes
BLOCK_SIZE_IN_KILOBYTES Data block size used to fill the disk (in KB). Default: 128.
WRITE_PERCENTAGE Percentage of total disk write. Default: 100.
NUMBER_OF_WORKERS Number of workers used to run the stress process. Default: 2.
DISK_PATH Path in the Windows VM to apply stress. Default: C:\\.
DURATION Duration that you specify, through which chaos is injected into the target resource (in seconds). Default: 30s. For more information, go to duration of the chaos.
RAMP_TIME Period to wait before and after injecting chaos (in seconds). For example, 30 s. For more information, go to ramp time.
+ +### Parameters + +Following YAML snippet illustrates the use of the input variables described earlier. + +[embedmd]:# (./static/manifests/windows-disk-stress/params.yaml yaml) +```yaml +apiVersion: litmuchaos.io/v1alpha1 +kind: MachineFault +metadata: + name: windows-disk-stress + labels: + name: disk-stress +spec: + infraType: windows + env: + - name: MEMORY_CONSUMPTION + value: "1024" + - name: BLOCK_SIZE_IN_KILOBYTES + value: "128" + - name: WRITE_PERCENTAGE + value: "100" + - name: NUMBER_OF_WORKERS + value: "2" + - name: DISK_PATH + value: "C:\\" + - name: DURATION + value: "30s" + - name: RAMP_TIME # optional tunable + value: "" +``` diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/windows-process-kill.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/windows-process-kill.md new file mode 100644 index 00000000000..d41a64df8f6 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/windows-process-kill.md @@ -0,0 +1,88 @@ +--- +id: windows-process-kill +title: Windows Process Kill +--- + +Windows process kill kills the target processes that are running as a part of a Windows OS based VM. The services that are disrupted might be running in the VM, and this fault kills their underlying processes or threads. + +![Windows process kill](./static/images/windows-process-kill.png) + +## Use cases +Windows process kill: +- Determines the resilience of an application (or process) running on the Windows VMs. +- Induces process kill using the process IDs and process name on the target Windows machines. +- Simulates loss of processes, leading to degradation in the performance of application on the machine. + +## Prerequisites +- Ensure that the [prerequisites](/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/prerequisites) are fulfilled before executing the experiment. + +### Mandatory tunables + + + + + + + + + + + + + + + + + +
Tunable Description Notes
FORCE If set to "enable", the process is forcefully killed. Default: disable.
DURATION Duration that you specify, through which chaos is injected into the target resource (in seconds). Default: 30s. For more information, go to duration of the chaos.
+ +### Optional tunables + + + + + + + + + + + + + + + + + + + + + +
Tunable Description Notes
PROCESS_IDS Process IDs of the target processes provided as comma-separated values. For example, 18375,25323,85657.
PROCESS_NAMES Comma separated list of process names to kill. For example, chrome.exe,firefox.exe.
RAMP_TIME Period to wait before and after injecting chaos (in seconds). For example, 30s. For more information, go to ramp time.
+ +### Parameters + +Following YAML snippet illustrates the use of these input variables. + +[embedmd]:# (./static/manifests/windows-process-kill/params.yaml yaml) +```yaml +apiVersion: litmuchaos.io/v1alpha1 +kind: MachineFault +metadata: + name: windows-process-kill + labels: + name: process-kill +spec: + infraType: windows + env: + - name: PROCESS_IDS + value: "" + - name: PROCESS_NAMES + value: "" + - name: FORCE + value: "disable" + - name: DURATION + value: "30s" + - name: RAMP_TIME + value: "" +``` \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/windows.md b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/windows.md index d4cdedd31c2..e9bb27f6696 100644 --- a/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/windows.md +++ b/docs/chaos-engineering/use-harness-ce/chaos-faults/windows/windows.md @@ -37,6 +37,20 @@ This fault helps determine how resilient an application is when stress is applie +### Windows Disk stress + +Windows disk stress injects disk stress into a Windows OS based VM, by consuming and exhausting the disk resources on the target Windows machine. + + +Use cases +- Simulates a lack of disk for processes running on the application, causing performance degradation and system slowdowns. +- Simulates slow application traffic or resource exhaustion, leading to degradation in the performance of processes on the machine. + + + + + + ### Windows memory stress @@ -127,4 +141,19 @@ Windows network loss causes network packet loss on Windows VM for the target hos + + +### Windows Process Kill + +Windows process kill kills the target processes that are running as a part of a Windows OS based VM. The services that are disrupted might be running in the VM, and this fault kills their underlying processes or threads. + + +Use cases +- It helps determine the resilience of an application (or process) running on the Windows VMs. +- Induces process kill using the process IDs and process name on the target Windows machines. +- Simulates loss of processes, leading to degradation in the performance of application on the machine. + + + + \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/chaoshubs/chaoshubs.md b/docs/chaos-engineering/use-harness-ce/chaoshubs/chaoshubs.md index 4f559155d25..dca904693d3 100644 --- a/docs/chaos-engineering/use-harness-ce/chaoshubs/chaoshubs.md +++ b/docs/chaos-engineering/use-harness-ce/chaoshubs/chaoshubs.md @@ -16,7 +16,7 @@ You can add ChaosHub using a Git service provider such as GitHub, where ChaosHub HCE provides a default Enterprise ChaosHub that includes a wide array of experiments and faults out of the box. You can also add your own custom ChaosHubs to maintain and share private scenarios and faults within your organization. -For a video tutorial, go to [HCE- ChaosHub](https://youtu.be/b4ggnJcZrcM). +[This](https://youtu.be/b4ggnJcZrcM) video provides a step-by-step walkthrough of using a ChaosHub. ### Permissions required diff --git a/docs/chaos-engineering/use-harness-ce/gameday.md b/docs/chaos-engineering/use-harness-ce/gameday.md index 94f8cb40334..9ac5eb13a68 100644 --- a/docs/chaos-engineering/use-harness-ce/gameday.md +++ b/docs/chaos-engineering/use-harness-ce/gameday.md @@ -31,7 +31,7 @@ A GameDay typically involves the following steps: 2. Observe the impact of the failure, 3. Discuss the technical outcomes. -To watch a video on GameDay, go to [GameDay](https://youtu.be/X-4Ripb4e2c) that describes how you can schedule a GameDay execution. +[This](https://youtu.be/X-4Ripb4e2c) video provides a step-by-step walkthrough of GameDay execution. :::tip GameDays help decide the type of failure the system would undergo based on the nature of the chaos experiments present within GameDay. Hence, HCE strongly recommends you begin with easy use cases with minimal blast radius, such as breaking one container, degrading one instance, and making one availability zone unavailable. Later, you can delve into more complex failures, such as failing an entire service or affecting a large percentage of requests. diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/govern-run.md b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/govern-run.md index 260c77ee1b0..26bf1466064 100644 --- a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/govern-run.md +++ b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/govern-run.md @@ -6,7 +6,7 @@ description: This topic describes how you can configure rules and conditions for This topic describes how you can configure **ChaosGuard** to enforce security policies during chaos experiments execution. An additional layer of security that is executed before running a chaos experiment is [ChaosGuard](/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/). -To watch a video on configuring the rules and conditions in ChaosGuard, go to [ChaosGuard- rules and conditions](https://youtu.be/cHFiBvQPCLU). +[This](https://youtu.be/cHFiBvQPCLU) video provides a walkthrough of configuring the rules and conditions in ChaosGuard. ## Prerequisites diff --git a/docs/chaos-engineering/use-harness-ce/image-registry.md b/docs/chaos-engineering/use-harness-ce/image-registry.md index e07f2c27371..56d07f5cda2 100644 --- a/docs/chaos-engineering/use-harness-ce/image-registry.md +++ b/docs/chaos-engineering/use-harness-ce/image-registry.md @@ -25,7 +25,8 @@ A custom image registry allows for storing container images securely, restrictin Follow the steps below to use [custom values](#custom-values-for-image-registry) or [default values](#default-values-for-image-registry) of the image registry in your chaos experiment. -To watch a video on configuring image registry, go to [image registry](https://youtu.be/jpSd1nGf8s0). + +[This](https://youtu.be/jpSd1nGf8s0) video provides a step-by-step walkthrough of configuring the Image Registry. ### Why use a Custom Image Registry? diff --git a/docs/chaos-engineering/use-harness-ce/infrastructures/upgrade-infra.md b/docs/chaos-engineering/use-harness-ce/infrastructures/upgrade-infra.md index 18862e8291a..c03d346882f 100644 --- a/docs/chaos-engineering/use-harness-ce/infrastructures/upgrade-infra.md +++ b/docs/chaos-engineering/use-harness-ce/infrastructures/upgrade-infra.md @@ -11,7 +11,7 @@ If a HCE release is not backward compatible, upgrade your chaos infrastructure t :::warning - If you don't upgrade your infrastructure for these types of releases, chaos experiments may fail. -- Checkout this [video tutorial to upgrade your chaos infrastructure to 1.38.x or higher](https://youtu.be/fAnsGqkcdkc). +- [This video](https://youtu.be/fAnsGqkcdkc) tutorial provides a step-by-step walkthrough to upgrade your chaos infrastructure to 1.38.x or higher]. ::: These are the high-level steps to upgrade your chaos infrastructure, explained in detail later: diff --git a/docs/chaos-engineering/use-harness-ce/probes/probes.md b/docs/chaos-engineering/use-harness-ce/probes/probes.md index b36b7b39cd3..cf0b4fffd97 100644 --- a/docs/chaos-engineering/use-harness-ce/probes/probes.md +++ b/docs/chaos-engineering/use-harness-ce/probes/probes.md @@ -35,7 +35,7 @@ Depending on the type of probe, probes can: - Validate your error budget (SLO probe), - Connect with the APM tool and assert metrics (Datadog probe). -For a video tutorial, go to [HCE- Resilience Probes](https://youtu.be/b4ggnJcZrcM). +[This](https://youtu.be/b4ggnJcZrcM) video provides a step-by-step walkthrough of creating a Resilience Probe. ### Declarative hypothesis diff --git a/docs/cloud-cost-management/3-use-ccm-cost-reporting/6-use-ccm-dashboards/download-dashboard-data-api.md b/docs/cloud-cost-management/3-use-ccm-cost-reporting/6-use-ccm-dashboards/download-dashboard-data-api.md new file mode 100644 index 00000000000..fb0f4c9d31d --- /dev/null +++ b/docs/cloud-cost-management/3-use-ccm-cost-reporting/6-use-ccm-dashboards/download-dashboard-data-api.md @@ -0,0 +1,66 @@ +--- +title: Download Dashboard Data via API +description: Use the Harness API to download an export of your dashboard data +--- + +Once you have a Harness dashboard created with one or more tiles, you can grab an export of the data using the Harness API. + +# Authentication + +You will need an [API Key](https://developer.harness.io/docs/platform/automation/api/add-and-manage-api-keys/) to retrive your exports. + +To set your key on requests, add the header `x-api-key: ` to the request. + +# Dashboard Parameters + +You will need the numerical dashboard ID and filter values for the API request. + +## Filters + +If your dashboard has filters at the dashboard level and you need to download the data when using specific values, you can grab these filter values from the UI. + +Navigate to your dashboard and set the filters accordingly and refresh the dashboard.mv + +![](./static/download-dashboard-data-api-01.png) + +Then in the URL bar of your browser, copy the `filters` query parameter. + +For example, if you URL is the following: + +`https://app.harness.io/ng/account/wlgELJ0TTre5aZhzpt8gVA/dashboards/folder/shared/view/607?filters=%22Time%2BRange%3D90%22` + +You would copy `filters=%22Time%2BRange%3D90%22`. + +## Dashboard ID + +When viewing your dashboard you can extract the dashboard ID from the URL after the `view/` path parameter. + +For example, if you URL is the following: + +`https://app.harness.io/ng/account/wlgELJ0TTre5aZhzpt8gVA/dashboards/folder/shared/view/607?filters=%22Time%2BRange%3D90%22` + +You would copy `607`. + +# Usage + +Details on this API call are documented in our [API spec](https://apidocs.harness.io/tag/dashboards#operation/get_dashboard_download_csv). + +You will need the following parameter values: + +- Harness Account ID +- Harness API Key +- Dashboard ID +- Filters Value + +## Example + +``` +curl -i -X GET \ + -H 'x-api-key: ' \ + 'https://app.harness.io/dashboard/download/dashboards//csv?accountId=&filters=&expanded_tables=true' \ + -o dashboard_output.zip +``` + +You will want to include `expanded_tables` when your data is over 5000 rows, which is the default limit for looker tables. + +The result will be a zip file called `dashboard_output.zip` which you can unzip and retrive a CSV of each tile in your dashboard. \ No newline at end of file diff --git a/docs/cloud-cost-management/3-use-ccm-cost-reporting/6-use-ccm-dashboards/static/download-dashboard-data-api-01.png b/docs/cloud-cost-management/3-use-ccm-cost-reporting/6-use-ccm-dashboards/static/download-dashboard-data-api-01.png new file mode 100644 index 00000000000..c69333192f5 Binary files /dev/null and b/docs/cloud-cost-management/3-use-ccm-cost-reporting/6-use-ccm-dashboards/static/download-dashboard-data-api-01.png differ diff --git a/docs/cloud-cost-management/4-use-ccm-cost-optimization/1-optimize-cloud-costs-with-intelligent-cloud-auto-stopping-rules/4-create-auto-stopping-rules/autostopping-dashboard.md b/docs/cloud-cost-management/4-use-ccm-cost-optimization/1-optimize-cloud-costs-with-intelligent-cloud-auto-stopping-rules/4-create-auto-stopping-rules/autostopping-dashboard.md index 191d347aa90..b205c9414f7 100644 --- a/docs/cloud-cost-management/4-use-ccm-cost-optimization/1-optimize-cloud-costs-with-intelligent-cloud-auto-stopping-rules/4-create-auto-stopping-rules/autostopping-dashboard.md +++ b/docs/cloud-cost-management/4-use-ccm-cost-optimization/1-optimize-cloud-costs-with-intelligent-cloud-auto-stopping-rules/4-create-auto-stopping-rules/autostopping-dashboard.md @@ -168,3 +168,13 @@ You can delete an AutoStopping rule from the Summary of Rules Page or from the d ![](./static/autostopping-dashboard-49.png) + +### Overlapping Schedules + + +Harness AutoStopping Rules now support overlapping schedules, offering enhanced flexibility for resource management. Users can define multiple fixed schedules within a single AutoStopping rule, even if they overlap. The resulting schedule is determined based on a customizable priority order, which can be adjusted using a drag-and-drop interface. + +Overlapping schedules are particularly useful for organizations with teams operating in different time zones or for scenarios where temporary overrides, such as maintenance windows, need to be added to an existing schedule. By prioritizing schedules, users can ensure that the most critical rules are applied at the right time without modifying or deleting existing configurations. + + + diff --git a/docs/cloud-cost-management/4-use-ccm-cost-optimization/1-optimize-cloud-costs-with-intelligent-cloud-auto-stopping-rules/4-create-auto-stopping-rules/static/overlapping_schedules1.png b/docs/cloud-cost-management/4-use-ccm-cost-optimization/1-optimize-cloud-costs-with-intelligent-cloud-auto-stopping-rules/4-create-auto-stopping-rules/static/overlapping_schedules1.png new file mode 100644 index 00000000000..f57adcc648f Binary files /dev/null and b/docs/cloud-cost-management/4-use-ccm-cost-optimization/1-optimize-cloud-costs-with-intelligent-cloud-auto-stopping-rules/4-create-auto-stopping-rules/static/overlapping_schedules1.png differ diff --git a/docs/cloud-cost-management/4-use-ccm-cost-optimization/1-optimize-cloud-costs-with-intelligent-cloud-auto-stopping-rules/4-create-auto-stopping-rules/static/overlapping_schedules2.png b/docs/cloud-cost-management/4-use-ccm-cost-optimization/1-optimize-cloud-costs-with-intelligent-cloud-auto-stopping-rules/4-create-auto-stopping-rules/static/overlapping_schedules2.png new file mode 100644 index 00000000000..8c6263b845f Binary files /dev/null and b/docs/cloud-cost-management/4-use-ccm-cost-optimization/1-optimize-cloud-costs-with-intelligent-cloud-auto-stopping-rules/4-create-auto-stopping-rules/static/overlapping_schedules2.png differ diff --git a/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/kubernetes-autostopping-traefik.md b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/kubernetes-autostopping-traefik.md index 5790b0cdae9..c8b39fcaa1a 100644 --- a/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/kubernetes-autostopping-traefik.md +++ b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/kubernetes-autostopping-traefik.md @@ -61,7 +61,7 @@ After applying the YAML, an AutoStopping Rule is created in your cluster for ser This header sends the AutoStoppingRule header to all the associated ingress routes. ```yaml -apiVersion: traefik.containo.us/v1alpha1 +apiVersion: traefik.io/v1alpha1 kind: Middleware metadata: name: test-rule-header @@ -71,6 +71,8 @@ spec: AutoStoppingRule: default-test-rule ``` +The `AutoStoppingRule` header should be set to a value following the pattern `-` which can be found from the metadata of the `AutoStoppingRule` you created above. + ## Change IngressRoute Once the Traefik ingressRoute is supported as a first class entity for AutoStopping, these changes will be automated. @@ -109,7 +111,7 @@ middlewares: Your ingressRoute should be similar to the following: ```yaml -apiVersion: traefik.containo.us/v1alpha1 +apiVersion: traefik.io/v1alpha1 kind: IngressRoute metadata: annotations: diff --git a/docs/cloud-cost-management/5-use-ccm-cost-governance/asset-governance/AWS/AWS-recommendations.md b/docs/cloud-cost-management/5-use-ccm-cost-governance/asset-governance/AWS/AWS-recommendations.md index af56c1034ca..4d23d4d553e 100644 --- a/docs/cloud-cost-management/5-use-ccm-cost-governance/asset-governance/AWS/AWS-recommendations.md +++ b/docs/cloud-cost-management/5-use-ccm-cost-governance/asset-governance/AWS/AWS-recommendations.md @@ -487,3 +487,138 @@ policies: - ```redshift:DescribeClusterSnapshots``` --- + +### Recommendation: delete-empty-dynamodb-tables + +**Description:** Delete DyanmoDB tables which are empty + +**Policy Used:** + +```yaml +policies: + - name: delete-empty-dynamodb-tables + resource: dynamodb-table + description: | + Delete DyanmoDB tables which are empty + filters: + - TableSizeBytes: 0 + actions: + - delete +``` +**Savings Computed:** The policy identifies a list of resources on which potential savings are calculated by summing up the cost of each resource for the last 30 days. + +--- + +### Recommendation: delete-stale-log-group + +**Description:** Delete stale cloud watch log groups + +**Policy Used:** + +```yaml +policies: + - name: delete-stale-log-group + resource: log-group + description: | + Delete stale cloud watch log groups + filters: + - type: last-write + days: 60 + actions: + - delete +``` +**Savings Computed:** The policy identifies a list of resources on which potential savings are calculated by summing up the cost of each resource for the last 30 days. + +--- + +### Recommendation: delete-stale-rds-snapshots + +**Description:** Delete all stale(older than 28 days) RDS snapshots + +**Policy Used:** + +```yaml +policies: + - name: delete-stale-rds-snapshots + resource: rds-snapshot + description: | + Delete all stale(older than 28 days) RDS snapshots + filters: + - type: age + days: 28 + op: ge + actions: + - delete +``` +**Savings Computed:** The policy identifies a list of resources on which potential savings are calculated by summing up the cost of each resource for the last 30 days. + +--- + +### Recommendation: delete-unencrypted-firehose + +**Description:** Delete Firehose which are not encrypted + +**Policy Used:** + +```yaml +policies: + - name: delete-unencrypted-firehose + resource: firehose + description: | + Delete Firehose which are not encrypted + filters: + - KmsMasterKeyId: absent + actions: + - type: delete +``` +**Savings Computed:** The policy identifies a list of resources on which potential savings are calculated by summing up the cost of each resource for the last 30 days. + +--- + +### Recommendation: delete-unencrypted-sqs + +**Description:** Delete SQS which are not encrypted + +**Policy Used:** + +```yaml +policies: + - name: delete-unencrypted-sqs + resource: sqs + description: | + Delete SQS which are not encrypted + filters: + - KmsMasterKeyId: absent + actions: + - type: delete +``` +**Savings Computed:** The policy identifies a list of resources on which potential savings are calculated by summing up the cost of each resource for the last 30 days. + +--- + +### Recommendation: delete-unused-nat-gateways + +**Description:** Delete unused NAT Gateways based on no associated traffic in past 7 days. + +**Policy Used:** + +```yaml +policies: + - name: delete-unused-nat-gateways + resource: nat-gateway + description: | + Delete unused NAT Gateways based on no associated traffic in past 7 days. + filters: + - type: metrics + name: BytesOutToDestination + statistics: Sum + period: 86400 + days: 7 + value: 0 + op: eq + actions: + - type: delete +``` +**Savings Computed:** The policy identifies a list of resources on which potential savings are calculated by summing up the cost of each resource for the last 30 days. + +--- diff --git a/docs/cloud-development-environments/features-of-gitspaces/private-docker-images.md b/docs/cloud-development-environments/features-of-gitspaces/private-docker-images.md index 8862f0cf315..c8bfcf54cf4 100644 --- a/docs/cloud-development-environments/features-of-gitspaces/private-docker-images.md +++ b/docs/cloud-development-environments/features-of-gitspaces/private-docker-images.md @@ -21,7 +21,7 @@ To pull your private Docker images, you need to connect Harness to your Artifact 2. [**Docker Registry**](https://developer.harness.io/docs/platform/connectors/cloud-providers/ref-cloud-providers/docker-registry-connector-settings-reference): You can also configure **Docker Registry** as a connector to access private Docker images. Refer to this guide on [adding a Docker registry](https://developer.harness.io/docs/platform/connectors/artifact-repositories/connect-to-an-artifact-repo#add-a-docker-registry). :::info -Please note that the Docker Connector is platform-agnostic and can be used to connect to any Docker container registry. +Please note that the Docker Connector is platform-agnostic and can be used to connect to any Docker V2 compliant registry. ::: 3. [**Amazon ECR**](https://developer.harness.io/docs/platform/connectors/cloud-providers/ref-cloud-providers/aws-connector-settings-reference): You can integrate **Amazon ECR** with Harness by adding an Amazon ECR connector. You can easily configure the required roles and policies, create the connector, and configure your credentials by following the steps in [this guide](https://developer.harness.io/docs/platform/connectors/cloud-providers/add-aws-connector). @@ -81,6 +81,8 @@ Here’s what you need to add to your ```devcontainer.json``` file: | [**Amazon ECR**](https://developer.harness.io/docs/platform/connectors/cloud-providers/add-aws-connector) | "Aws" | - The `identifier` field should include the connector identifier, adjusted based on its scope level. +- We also provide **Nexus** support for private Docker images using the same Docker Registry connector. + :::info Currently, you can use only one connector per Gitspace to pull images from a private Repository. diff --git a/docs/cloud-development-environments/ide's/vs-code-browser.md b/docs/cloud-development-environments/ide's/vs-code-browser.md index 52220bc6694..a8bd24e2f1d 100644 --- a/docs/cloud-development-environments/ide's/vs-code-browser.md +++ b/docs/cloud-development-environments/ide's/vs-code-browser.md @@ -13,7 +13,7 @@ Harness CDE is now available in public beta. To enable it on your account, conta ::: -Harness CDE provides seamless support to connect and access your Gitspaces remotely within your browser via VS Code. This +Harness CDE provides seamless support to connect and access your Gitspaces remotely within your browser via VS Code. This guide will take you through the steps required to access a Gitspace in your VS Code Browser: @@ -22,7 +22,7 @@ Please ensure that while creating one, you choose “VS Code Browser” as the s ![](./static/vs-code-browser.png) -2. After the Gitspace is created, click “Open VS Code Onliner” from the Harness UI. +2. After the Gitspace is created, click “Open VS Code Online” from the Harness UI. ![](./static/vs-code-browser-2.png) diff --git a/docs/cloud-development-environments/introduction/whats-supported.md b/docs/cloud-development-environments/introduction/whats-supported.md index 1ac01a08c82..dfc6b00b620 100644 --- a/docs/cloud-development-environments/introduction/whats-supported.md +++ b/docs/cloud-development-environments/introduction/whats-supported.md @@ -47,7 +47,7 @@ To use VS Code Desktop, you need to [install and configure](/docs/cloud-developm To pull your private Docker images, you need to connect Harness to your Artifact Repository by adding a repository connector. Currently, we support the following connectors: 1. [JFrog Artifactory](https://developer.harness.io/docs/platform/connectors/cloud-providers/ref-cloud-providers/artifactory-connector-settings-reference) -2. [Docker Registry](https://developer.harness.io/docs/platform/connectors/cloud-providers/ref-cloud-providers/docker-registry-connector-settings-reference) +2. [Docker Registry](https://developer.harness.io/docs/platform/connectors/cloud-providers/ref-cloud-providers/docker-registry-connector-settings-reference) (any Docker V2 compliant registry) 3. [Amazon ECR](https://developer.harness.io/docs/platform/connectors/cloud-providers/add-aws-connector) ## Regions Available​ diff --git a/docs/cloud-development-environments/overview.md b/docs/cloud-development-environments/overview.md index 496cd6e7c2d..3895540c3d7 100644 --- a/docs/cloud-development-environments/overview.md +++ b/docs/cloud-development-environments/overview.md @@ -37,7 +37,7 @@ CDEs eliminate these challenges with characteristics like: - *Flexible*: Each Gitspace can be spun up with a custom hardware configuration, so you can get bigger machines for resource-intensive applications and smaller machines for trivial ones. -## Use Cases of CDE +## Use Cases - *Faster Onboarding*: Developers can start coding on day 1 with a single click! This is not just for new developers joining an organization, but also for those switching teams or projects internally. - *Higher developer productivity & satisfaction*: CDEs eliminate toil! Developers no longer spend time on frustrating and thankless tasks such as trying to reproduce environment specific issues or reconfiguring their local machines. diff --git a/docs/continuous-delivery/cd-integrations.md b/docs/continuous-delivery/cd-integrations.md index ab908d86660..83d83a3243b 100644 --- a/docs/continuous-delivery/cd-integrations.md +++ b/docs/continuous-delivery/cd-integrations.md @@ -22,6 +22,10 @@ To enable a feature flag in your Harness account, contact [Harness Support](mail | Flag | Description | | --- | --- | +| CDS_ARTIFACT_DISABLE_VALIDATION | Enables users to bypass primary and sidecar artifact consumption checks in the service of a **Deploy** stage. For more information, go to Harness [Skip Artifact Consumption for the Stage](/docs/continuous-delivery/x-platform-cd-features/services/artifact-sources/#skip-artifact-consumption-for-the-stage). **This feature is in Limited GA** | +| CDS_CROSS_SCOPED_ENV_GROUPS | Enables users to add environments created at Account and Orgainsation level to Environment Groups. For more information, go to Harness [Cross Scope Environment Groups](https://developer.harness.io/docs/continuous-delivery/x-platform-cd-features/environments/create-environment-groups#cross-scope-environment-groups). **This feature is in Limited GA** | +| CDS_K8S_DETAILED_POD_LOGS | Enables users to view detailed logs of **Wait For Steady State** step in Kubernetes deployments. For more information, go to Harness [Detailed diagnostics for K8s Deployment](https://developer.harness.io/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/kubernetes-cd-quickstart#detailed-diagnostics-for-k8s-deployment). **This feature is in Limited GA** | +| CDS_SERVICE_OVERRIDES_2_0_YAML_V2_SUPPORT | Ensures that overrides from environment configuration YAML are ignored when Override V2 is enabled. This feature is in Limited GA. | | CDS_GOOGLE_CLOUD_RUN | Allows users to deploy artifacts to Google Cloud Run. This feature is in Limited GA. | | CDS_AZURE_FUNCTION | Allows users to deploy Azure Functions through Harness. This feature is in Limited GA. | | CDS_SVC_ENV_DASHBOARD_FOR_ACCOUNT_AND_ORG_LEVEL | Allows users to do Post-Deployment Rollback for services at both the Account and Organisation levels. This feature is in Limited GA. | diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/aws/aws-sam-deployments.md b/docs/continuous-delivery/deploy-srv-diff-platforms/aws/aws-sam-deployments.md index 91fbc598c22..2221f06bf86 100644 --- a/docs/continuous-delivery/deploy-srv-diff-platforms/aws/aws-sam-deployments.md +++ b/docs/continuous-delivery/deploy-srv-diff-platforms/aws/aws-sam-deployments.md @@ -103,6 +103,8 @@ Unlike old images, in new images a single image has the capabiliity of handling Now, let's understand the runtime image:- ![](./static/multiple-runtime-sam.png) +For ECR users, you can access these images via the [ECR Image Repository for SAM Plugin](https://gallery.ecr.aws/harness/harness/aws-sam-plugin). + :::important note These images can only be used in containerized step. ::: diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/azure/azure-function-tutorial.md b/docs/continuous-delivery/deploy-srv-diff-platforms/azure/azure-function-tutorial.md index ff25572a5aa..8370f6376ac 100644 --- a/docs/continuous-delivery/deploy-srv-diff-platforms/azure/azure-function-tutorial.md +++ b/docs/continuous-delivery/deploy-srv-diff-platforms/azure/azure-function-tutorial.md @@ -100,9 +100,17 @@ When you're done, infrastructure will look something like this: ![](static/azure-functions-6.png) +:::info note +In the deploy steps under Container Configuration, specify the appropriate image based on your container registry: + +1. [ECR Image for azure function plugin](https://gallery.ecr.aws/harness/harness/azure-function-plugin) +2. [Docker Image for azure function plugin](https://hubgw.docker.com/r/harnessdev/azure-function-plugin) + +::: + ## Azure function Deploy -In the execution tab of the pipeline stage, select *Add Step** and select **Azure function Deploy**. +In the execution tab of the pipeline stage, select **Add Step** and select **Azure function Deploy**. Currently Azure function deployment supports basic and custom deployment strategy. @@ -111,7 +119,7 @@ The Azure function Deploy step has the following settings: * **Name:** Enter a name for the step. * **Timeout:** Enter a minimum of **10m**. The slot deployment relies on Azure and can take time. * **Azure Connector**: Specify the connector that connects to the azure infrastructure. - * **Image**: Specify the artifact image you want to deploy. + * **Image**: Specify the artifact image you want to run. * **Function app**: Specify the Azure Container to be used. * **Deployment Slot:** Enter the name of the Source slot for the deployment. This slot is where Harness deploys the new Azure Function version. Make sure the slot you enter is running. @@ -148,4 +156,10 @@ Additionally you can have optional configurations such as * **Limit Memory:** Defines the maximum memory that can be allocated to the container or function during execution. * **Limit CPU:** Sets a limit on the CPU usage for the function or container, ensuring the function does not consume excessive resources. -![](static/azure-functions-7.png) \ No newline at end of file +![](static/azure-functions-7.png) + +## Azure Function Deployment Sample + +To see an example of how to deploy Azure Functions using Harness, visit the [Harness Community Repository](https://github.com/harness-community/harnesscd-example-apps/tree/master/azure-function-deployment). + +This repository provides a ready-to-use sample application and the necessary configuration files to help you get started quickly. diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/google-cloud-run.md b/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/google-cloud-run.md index da4f7397a6d..a4642335683 100644 --- a/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/google-cloud-run.md +++ b/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/google-cloud-run.md @@ -9,105 +9,187 @@ redirect_from: # Google Cloud Run Deployments :::note - Currently, this feature is behind the feature flag `CDS_GOOGLE_CLOUD_RUN`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. +::: + +This guide explains how to deploy artifacts to Google Cloud Run using Harness. + +## Overview + +Harness supports deploying both **Google Cloud Run Services** and **Google Cloud Run Jobs**. + +- A **Google Cloud Run Service** is a stateless containerized application that scales automatically based on traffic. It is ideal for APIs, websites, and event-driven backends. +- A **Google Cloud Run Job** is designed for task-based execution and runs to completion, making it ideal for batch processing, scheduled tasks, or background workloads. + +Follow these steps to set up a **Google Cloud Run Service** in Harness: + +## Create a CD Pipeline + +1. In the Harness UI, create a new CD pipeline. +2. Add a Deploy stage and select **Google Cloud Run** as the deployment type. +3. Click **Set Up Stage**. +![](static/google-cloud-run-1.png) + +## Configure the Service + +:::note +Only Google Artifact Repository and Docker Registry are supported as artifact repositories. ::: -This topic explains how to deploy an artifact to Google Cloud Run using Harness. - -## Deployment Summary - -Here's a high-level summary of the setup steps: - -1. Create a Harness CD pipeline. -2. Add a Deploy stage. -3. Select the deployment type **Google Cloud Run**, and then select **Set Up Stage**. - ![](static/google-cloud-run-1.png) -4. Select **Add Service**. - 1. Add the **Google Cloud Run Service Manifest Definition** to the new Cloud Run service. Manifest Definition must define stateless, containerized applications that are compatible with Google Cloud Run - - Currently, we support only two Artifact Repository types: Google Artifact Repository and Docker Registry. - - 2. Save the new service. -5. Select **New Environment**, name the new environment, and select **Save**. -6. In **Infrastructure Definition**, select **New Infrastructure**. - - In Google Cloud Provider Details, create or select the Harness GCP connector, **GCP project**, and **GCP region**, and select **Save**. - - - The user can provide a GCP Connector to authenticate with the Google Account. - - **GCP Authentication Support**: - - Google OIDC Support. Users can create an OIDC connector to connect to the GCP account. For more information, go to [Use OpenID Connect (OIDC) Connector](https://developer.harness.io/docs/platform/connectors/cloud-providers/ref-cloud-providers/gcs-connector-settings-reference/#use-openid-connect-oidc) - - Service Account. Users can create a GCP connector by providing a Service Account. For more information, go to [Create a GCP connector](/docs/platform/connectors/cloud-providers/connect-to-google-cloud-platform-gcp/) - - Users can provide a project ID to specify which project. - - Users can provide a region to specify the region they want to deploy the Cloud Run service. - -7. In the **Execution** tab, select the deployment strategy. Currently, Harness supports the **Basic** and **Canary** deployment strategies - - Harness automatically adds the **Deploy Cloud Run Step Group** based on the strategy you select: - - For **Basic Strategy**, the following steps are automatically added: - 1. Download Manifest - 2. Google Cloud Run Prepare Rollback - 3. Google Cloud Run Deploy Step - - ![](static/google-cloud-run-2.png) - - Additionally, you can add Google Cloud Run Traffic Shift Step if required. - - - For **Canary Strategy**, the following steps are automatically added: - 1. Download Manifest - 2. Google Cloud Run Prepare Rollback - 3. Google Cloud Run Deploy Step - 4. Google Cloud Run Traffic Shift Step - - ![](static/google-cloud-run-3.png) - - :::note - In the deploy steps under Container Configuration, use the public Docker image: - [`harness/google-cloud-run-plugin:1.0.0-linux-amd64`](https://hub.docker.com/layers/harness/google-cloud-run-plugin/1.0.0-linux-amd64/images/sha256-2ad0c6d434673e56df47f1014c397d2bbc8248f8e76b5bbd48965f116f4843f2?context=explore). This image is required to perform deployments to Google Cloud Run. - ::: - - All the configurations needed for the steps to run are passed as environment variables: - 1. **Download Manifest**: - - Downloads the manifest specified in the service. - 2. **Google Cloud Run Prepare Rollback**: - - Fetches the current configurations of the deployment and saves it. These details can be used during the rollback step. - - The Prepare Rollback step uses the `gcloud run services describe` command to fetch the details of the service. For more information, go to [Google Cloud Run Documentation](https://cloud.google.com/sdk/gcloud/reference/run/services/replace). - 3. **Google Cloud Run Deploy Step**: - - Applies the configurations YAML file and creates a new service or revision. - - The Deploy Step uses the `gcloud run services replace` command to apply the YAML manifest provided by the user. For more information, go to [Google Cloud Run Documentation](https://cloud.google.com/sdk/gcloud/reference/run/services/replace). - - Updates the container image field in the YAML manifest with the image provided in the service artifact, ensuring that the manifest reflects the correct configuration. - - You can provide additional command options in the **Optional Configuration** under **Google Cloud Run Services Replace Command Options**. - - Harness fetches the instance details configured by Google platform using the **Google Cloud Monitoring SDK** and saves them. These details can be viewed in the logs while running this step. For more information, go to [Google Cloud Run Documentation](https://cloud.google.com/monitoring/custom-metrics/reading-metrics). - - **Note**: We do not use the Google Run Deploy command here as this command takes every field as a command flag. Instead, we use the replace command and replace all the configurations provided in the YAML file. - 4. **Google Cloud Run Traffic Shift Step**: - - Controls traffic distribution across different revisions. - - The Traffic Shift Step leverages the `gcloud run services update-traffic` command to manage traffic distribution between service revisions. For more information, go to [Google Cloud Run Documentation](https://cloud.google.com/sdk/gcloud/reference/run/services/update-traffic). - - Users can specify the percentage of traffic each revision should serve under **Revision Traffic Details**. - - Additionally, you can have **Optional Configurations** such as: - - - **Pre-Execution Command**: Run commands before deployment. - - **Image Pull Policy**: Specifies when the container image should be pulled from the registry. - - **Run as User**: Configures the user identity under which the function or container should run, useful for security and access control. - - **Limit Memory**: Defines the maximum memory that can be allocated to the container or function during execution. - - **Limit CPU**: Sets a limit on the CPU usage for the function or container, ensuring the function does not consume excessive resources. - - **Environment Variables**: Additional environment variables can be configured. - -8. Define the rollback step: - - **Rollback Step**: - - Reverts services to previous configurations or states. - - If it’s the first deployment, the service is deleted using the `gcloud run services delete` command. - - For subsequent deployments, traffic is redirected to older revisions using the `gcloud run services update-traffic` command. - - **Note**: Google Cloud Run does not allow you to delete the new revision. Only the traffic is diverted to the previous revisions. - -10. Select **Save**, and then run the pipeline. +1. Select **Add Service** and add the **Google Cloud Run Service Manifest Definition**. This defines the containerized application compatible with Google Cloud Run. +2. Save the new service. -## Cloud Run Permission Requirements +## Configure the Environment and Infrastructure + +1. Select **New Environment**, provide a name for the environment, and click **Save**. +2. In the **Infrastructure Definition** section, click **New Infrastructure** and configure the following details: + + - **Harness GCP Connector**: Create or select a Harness GCP connector to authenticate with your Google Account. + - **GCP Project**: Specify the project ID to define the target GCP project. + - **GCP Region**: Select the region where the Cloud Run service will be deployed. + +3. Save the infrastructure configuration. + +**GCP Authentication Support** + +Harness supports the following methods for GCP authentication: -When you set up a Harness GCP connector to connect Harness with your GCP account, the GCP IAM user or service account must have the appropriate permissions assigned to their account. +- **Google OIDC Support**: + Create an OIDC connector to connect to the GCP account. For more details, refer to [Use OpenID Connect (OIDC) Connector](https://developer.harness.io/docs/platform/connectors/cloud-providers/ref-cloud-providers/gcs-connector-settings-reference/#use-openid-connect-oidc). + +- **Service Account**: + Create a GCP connector by providing a Service Account. For more details, refer to [Create a GCP connector](/docs/platform/connectors/cloud-providers/connect-to-google-cloud-platform-gcp). + + +## Configure the Execution + +In the **Execution** tab, select the deployment strategy. Currently, Harness supports the **Basic** and **Canary** deployment strategies for Google Cloud Run Service. + +Harness automatically adds the **Deploy Cloud Run Step Group** based on the strategy you select. + +The **Basic Strategy** includes the following steps: +1. **Download Manifest** +2. **Google Cloud Run Prepare Rollback** +3. **Google Cloud Run Deploy Step** + + ![](static/google-cloud-run-2.png) + +Optionally, you can add a **Google Cloud Run Traffic Shift Step**. + +The **Canary Strategy** includes the following steps: +1. **Download Manifest** +2. **Google Cloud Run Prepare Rollback** +3. **Google Cloud Run Deploy Step** +4. **Google Cloud Run Traffic Shift Step** + +![](static/google-cloud-run-3.png) + + +### Download Manifest +- **Purpose**: Downloads the manifest specified in the service. +- **Details**: The manifest contains all the configuration details necessary for deploying the service. + +### Google Cloud Run Prepare Rollback +- **Purpose**: Fetches and saves the current deployment configurations for potential rollback scenarios. +- **Details**: + - Uses the `gcloud run services describe` command to retrieve service details. + - Saves configuration details for use during rollback if required. + - For more information, see the [Google Cloud Run Documentation](https://cloud.google.com/sdk/gcloud/reference/run/services/describe). + +### Google Cloud Run Deploy Step +- **Purpose**: Applies the YAML manifest to create a new service or revision. +- **Details**: + - Executes the `gcloud run services replace` command to deploy the service. + - Updates the container image in the YAML manifest to match the specified artifact. + - Harness fetches and logs instance details using the **Google Cloud Monitoring SDK**. + - Additional configuration options can be specified under **Google Cloud Run Services Replace Command Options**. + - For more information, see the [Google Cloud Run Documentation](https://cloud.google.com/sdk/gcloud/reference/run/services/replace). + +:::note +We do not use the Google Run Deploy command here as this command takes every field as a command flag. Instead, we use the replace command and replace all the configurations provided in the YAML file. +::: + +### Google Cloud Run Traffic Shift Step +- **Purpose**: Manages traffic distribution across different revisions of the service. +- **Details**: + - Uses the `gcloud run services update-traffic` command. + - Allows users to specify the percentage of traffic each revision should handle. + - For more information, see the [Google Cloud Run Documentation](https://cloud.google.com/sdk/gcloud/reference/run/services/update-traffic). + +### Container Configuration + +For Container Registry, create or select a Docker connector to access the container registry. Use the following public Docker image: +- [`harness/google-cloud-run-plugin:1.0.1-linux-amd64`](https://hub.docker.com/layers/harness/google-cloud-run-plugin/1.0.1-linux-amd64/images/sha256-bfb25c236e59041452ca81c7370a5d1ca924b361acb5309de3424ccc0645d074). + +This image is required to perform deployments to Google Cloud Run + +#### Optional Configurations +Harness allows for several optional configurations to customize deployment behavior: +- **Pre-Execution Command**: Run commands before deployment. +- **Image Pull Policy**: Specifies when to pull the container image from the registry. +- **Run as User**: Configures the user identity for security and access control. +- **Limit Memory**: Defines the maximum memory for the container or function. +- **Limit CPU**: Sets a limit on CPU usage for the container or function. +- **Environment Variables**: Additional environment variables can be configured. + + +### Rollback Steps +Harness provides rollback functionality to revert to previous configurations or states: +- **First Deployment**: Deletes the service using the `gcloud run services delete` command. +- **Subsequent Deployments**: Redirects traffic to older revisions using the `gcloud run services update-traffic` command. + +:::note +Google Cloud Run does not allow deletion of the new revision; only traffic can be diverted to previous revisions. +::: + +### Google Cloud Run Job Step + +You can also add a Google Cloud Run Job step in the Execution tab. + +![](static/google-cloud-run-job.png) + +**Container Configuration** + +For Container Registry, create or select a Docker connector to access the container registry. Use the following public Docker image: +- [`harness/google-cloud-run-plugin:1.0.1-linux-amd64`](https://hub.docker.com/layers/harness/google-cloud-run-plugin/1.0.1-linux-amd64/images/sha256-bfb25c236e59041452ca81c7370a5d1ca924b361acb5309de3424ccc0645d074). + +This image is required to perform deployments to Google Cloud Run. + +You can define your job using either Job Name or Job Manifest. + +**Job Name**: Select Job Name when you already have a job defined in your Google Cloud Platform and you only want to execute it. + - Under **Job Name**, specify the name of the job you want to run. + - You can also make it a Runtime Input or an Expression. + +**Job Manifest**: Select Job Manifest when you want to deploy a new job or update an existing job using a manifest file. + - Click **+ Google Cloud Run Job Manifest**. + - In the **Specify Google Cloud Run Job Manifest Store**, select the source where the manifest file is stored. + - In **Manifest Details**, specify the path where the manifest file is stored. + +**Deploy Step** + +The deploy step uses the deploy `gcloud run jobs replace` command and execute `gcloud run jobs execute` command in sequence. +For more information on `gcloud run jobs replace` command, refer to the [Google Documentation](https://cloud.google.com/sdk/gcloud/reference/run/jobs/replace?hl=en). +For more information on `gcloud run jobs execute` command, refer to the [Google Documentation](https://cloud.google.com/sdk/gcloud/reference/run/jobs/execute?hl=en). + +**Optional Configurations** + +You can provide additional command options (flags) in the Optional Configuration section: + +- **Google Cloud Run Jobs Replace Command Options**: Used for the replace function. +- **Google Cloud Run Jobs Execute Command Options**: Used for the execute function. + +Additionally, you can configure the following options: + +- **Pre-Execution Command**: Run commands before deployment. +- **Image Pull Policy**: Specifies when the container image should be pulled from the registry. +- **Run as User**: Configures the user identity under which the function or container should run, useful for security and access control. +- **Limit Memory**: Defines the maximum memory that can be allocated to the container or function during execution. +- **Limit CPU**: Sets a limit on the CPU usage for the function or container, ensuring the function does not consume excessive resources. +- **Environment Variables**: Additional environment variables can be configured. + +## Cloud Run Permission Requirements
Cloud Functions Minimum Permissions @@ -166,3 +248,9 @@ Alternatively, the following roles can also be used: - This role grants you the necessary permissions to create, update, and delete workload identity pools and providers.
+ +## Google Cloud Run Sample + +To see an example of how to deploy Google CLoud Run Service using Harness, visit the [Harness Community Repository](https://github.com/harness-community/harnesscd-example-apps/tree/master/google-cloud-run). + +This repository provides a ready-to-use sample application and the necessary configuration files to help you get started quickly. diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/google-functions.md b/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/google-functions.md index f8e16bcdbe2..a80e90c2f88 100644 --- a/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/google-functions.md +++ b/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/google-functions.md @@ -38,7 +38,9 @@ Harness supports the following: - For Google Cloud Functions 2nd gen, Harness does not support [Google Cloud Source Repository](https://cloud.google.com/functions/docs/deploy#from-source-repo) at this time. Only Google Cloud Storage is supported. - For Google Cloud Functions 1st gen, Harness supports both Google Cloud Storage and Google Cloud Source. -- We support OpenID Connect (OIDC) authentication in Google Cloud Functions, enabling seamless integration with OIDC-compliant identity providers for enhanced security and user management. + + +We support OpenID Connect (OIDC) authentication in Google Cloud Functions, enabling seamless integration with OIDC-compliant identity providers for enhanced security and user management. To configure OIDC authentication in GCP, navigate to account/project settings. diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/static/google-cloud-run-1.png b/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/static/google-cloud-run-1.png index d63c2efd5c4..6fce6cfdf2a 100644 Binary files a/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/static/google-cloud-run-1.png and b/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/static/google-cloud-run-1.png differ diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/static/google-cloud-run-job.png b/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/static/google-cloud-run-job.png new file mode 100644 index 00000000000..1f02bb25847 Binary files /dev/null and b/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/static/google-cloud-run-job.png differ diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/kubernetes-cd-quickstart.md b/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/kubernetes-cd-quickstart.md index d288f02ab8e..b46321298be 100644 --- a/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/kubernetes-cd-quickstart.md +++ b/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/kubernetes-cd-quickstart.md @@ -237,6 +237,31 @@ If you run into any errors, it is typically because the cluster does meet the re Next, try using Harness [Continuous Integration](/docs/continuous-integration/get-started/tutorials) to build a codebase, upload it to a repo, and run unit and integrations tests. +## Detailed diagnostics for K8s Deployment + +Harness provides detailed log information for pods and container during the Wait For Steady State step in Kubernetes deployments, helping you troubleshoot deployment issues by providing real-time insights into pod statuses. + +:::note + +Currently, this feature is behind the feature flag `CDS_K8S_DETAILED_POD_LOGS`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. + +::: + +These logs include detailed information from `status.condition`, `status.containerStatuses`, and `status.initContainerStatuses` during the Wait For Steady State step. + +**Key Details**: +- Logs are polled every 30 seconds. If the task completes in less than 30 seconds, no logs will be displayed. +- The log output visually represents the status of the pods and container using color codes. Below is an example of how the container statuses appear in the logs. + + - Red: Terminated + ![](./static/kubernetes-logs-3.png) + + - Yellow: Waiting + ![](./static/kubernetes-logs.png) + + - White: Running + ![](./static/kubernetes-logs-2.png) + ## Clean up the deployment For steps on deleting the Delgate, go to [delete a delegate](/docs/platform/delegates/manage-delegates/delete-a-delegate). diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/static/kubernetes-logs-2.png b/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/static/kubernetes-logs-2.png new file mode 100644 index 00000000000..7f2a4ffd3c2 Binary files /dev/null and b/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/static/kubernetes-logs-2.png differ diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/static/kubernetes-logs-3.png b/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/static/kubernetes-logs-3.png new file mode 100644 index 00000000000..b5fe9455b69 Binary files /dev/null and b/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/static/kubernetes-logs-3.png differ diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/static/kubernetes-logs.png b/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/static/kubernetes-logs.png new file mode 100644 index 00000000000..77dd7b45786 Binary files /dev/null and b/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/static/kubernetes-logs.png differ diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/serverless/serverless-lambda-cd-quickstart.md b/docs/continuous-delivery/deploy-srv-diff-platforms/serverless/serverless-lambda-cd-quickstart.md index da144bc72e7..5f392161a7e 100644 --- a/docs/continuous-delivery/deploy-srv-diff-platforms/serverless/serverless-lambda-cd-quickstart.md +++ b/docs/continuous-delivery/deploy-srv-diff-platforms/serverless/serverless-lambda-cd-quickstart.md @@ -259,6 +259,7 @@ There are two flavours of images available first with serverless installed and o | python 3.12 | harness/serverless-plugin:python3.12-3.39.0-1.0.1-beta-linux-amd64 | harness/serverless-plugin:python3.12-1.0.1-beta-linux-amd64 | | ruby 3.2 | harness/serverless-plugin:ruby3.2-3.39.0-1.0.1-beta-linux-amd64 | harness/serverless-plugin:ruby3.2-1.0.1-beta-linux-amd64 | +For ECR users, you can access these images via the [ECR Image Repository for Serverless Plugin](https://gallery.ecr.aws/harness/harness/serverless-plugin). Now, let's understand the runtime image one with serverless installed and one without serverless installed. diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/tanzu/static/output-variable-logs.png b/docs/continuous-delivery/deploy-srv-diff-platforms/tanzu/static/output-variable-logs.png new file mode 100644 index 00000000000..9b1e063a4b4 Binary files /dev/null and b/docs/continuous-delivery/deploy-srv-diff-platforms/tanzu/static/output-variable-logs.png differ diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/tanzu/static/tanzu-secrets-output-variable.png b/docs/continuous-delivery/deploy-srv-diff-platforms/tanzu/static/tanzu-secrets-output-variable.png new file mode 100644 index 00000000000..5450ac25804 Binary files /dev/null and b/docs/continuous-delivery/deploy-srv-diff-platforms/tanzu/static/tanzu-secrets-output-variable.png differ diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/tanzu/tanzu-command-step.md b/docs/continuous-delivery/deploy-srv-diff-platforms/tanzu/tanzu-command-step.md index c867179d4e4..13f29b3e4fa 100644 --- a/docs/continuous-delivery/deploy-srv-diff-platforms/tanzu/tanzu-command-step.md +++ b/docs/continuous-delivery/deploy-srv-diff-platforms/tanzu/tanzu-command-step.md @@ -74,6 +74,7 @@ To export variables from the script to other steps in the stage, you use the **O The following variable types are supported: - String +- Secret Output variables are passed from the the script output to the Harness pipeline and can be referenced in subsequent steps and settings using expressions. @@ -116,6 +117,16 @@ If you exit from the script (`exit 0`), Harness does not populate the output var ::: +You can access a secret configured in the **Secrets Manager** using an expression. For example, `<+secrets.getValue('SECRET_NAME')>`. + +You can also configure variables of type Secret as output variables. When an output variable is configured as a secret, its value is encrypted. + +![](static/tanzu-secrets-output-variable.png) + +The encrypted secret is decrypted and made available for use in the script. However, the script's output will not display the secret, even if the secret is explicitly passed to the output stream. + +![](static/output-variable-logs.png) + ### Using manifests in your scripts In order to use defined manifests in the Service in Tanzu command script you have to use a special expression `${service.manifest}` diff --git a/docs/continuous-delivery/get-started/cd-tutorials/ownapp.md b/docs/continuous-delivery/get-started/cd-tutorials/ownapp.md index 3262e9cd3ed..4873d9d1f17 100644 --- a/docs/continuous-delivery/get-started/cd-tutorials/ownapp.md +++ b/docs/continuous-delivery/get-started/cd-tutorials/ownapp.md @@ -32,8 +32,6 @@ import AMDCLI from '/docs/platform/shared/cli/amd.md'; This tutorial is a continuation of the [Kubernetes Manifest tutorial](./manifest). In that tutorial, we guided you through creating a sample pipeline using the Guestbook sample app. In this tutorial, we'll walk you through deploying your own microservice app with the Harness CD pipeline or GitOps workflow. -**Sock Shop**, developed by Weaveworks, serves as a polyglot architectural pattern to showcase microservices-based deployments. This application suite integrates a range of technologies, such as SpringBoot, Go, REDIS, MYSQL, MongoDB, among others. We've chosen the Sock Shop as our demonstration app for the deployment process in Harness. - You can use the same steps to integrate and deploy your own microservice app. :::info @@ -45,317 +43,227 @@ You can use the same steps to integrate and deploy your own microservice app. You can choose to proceed with the tutorial either by using the command-line interface (Harness CLI) or the user interface (Harness UI). - + -## Before you begin \{#before-you-begin-gitops} + + -Verify that you have the following: +## Before you begin \{#before-you-begin-ui} -1. **A Kubernetes cluster**. We recommend [K3D](https://k3d.io/v5.5.1/) for installing the Harness GitOps Agent and deploying a sample application in a local development environment. - - For requirements, go to [Harness GitOps Agent Requirements](/docs/continuous-delivery/gitops/connect-and-manage/install-a-harness-git-ops-agent#requirements). -2. **Fork the [harnesscd-example-apps](https://github.com/harness-community/harnesscd-example-apps/fork)** repository using the GitHub web interface to utilize the Harness resource YAMLs. +Verify that you have the following: -## Getting Started with Harness GitOps +1. **Obtain GitHub personal access token with the repo scope**. See the GitHub documentation on [creating a personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line). +2. **A Kubernetes cluster**. Use your own Kubernetes cluster or we recommend using [K3D](https://k3d.io/v5.5.1/) for installing Harness Delegates and deploying a sample application in a local development environment. + - Check [Delegate system requirements](/docs/platform/delegates/delegate-concepts/delegate-requirements). +3. **Install the [Helm CLI](https://helm.sh/docs/intro/install/)** in order to install the Harness Helm delegate. +4. **Fork the [harnesscd-example-apps](https://github.com/harness-community/harnesscd-example-apps/fork)** repository using the GitHub web interface to utilize the Harness resource YAMLs. - - +## Getting Started with Harness CD \{#getting-started-harness-cd-ui} -1. Login to [Harness](https://app.harness.io/). +1. Log in to [Harness](https://app.harness.io/). 2. Select **Projects**, and then select **Default Project**. -3. Select **Deployments**, and then select **GitOps**. -### GitOps Agent +### Delegate -1. You have the option to use the same agent that you deployed during the Manifest tutorial or to deploy a new agent by following the steps below. However, remember to use a newly created agent identifier when creating repositories and clusters. - - Select **Settings**, and then select **GitOps Agents**. - - Select **New GitOps Agent**. - - When are prompted with **Do you have any existing Argo CD instances?**, select **Yes** if you already have a Argo CD Instance, or else choose **No** to install the **Harness GitOps Agent**. +3. You have the option to use the same delegate that you deployed during the Manifest tutorial or to deploy a new delegate by following the steps below. However, remember to use a newly created delegate identifier when creating connectors. - - +- Log in to the [Harness UI](https://app.harness.io/). In **Project Setup**, select **Delegates**. -- Select **No**, and then select **Start**. -- In **Name**, enter the name for the new Agent `ownappagent` -- In **Namespace**, enter the namespace where you want to install the Harness GitOps Agent. Typically, this is the target namespace for your deployment. - - For this tutorial, let's use the `default` namespace to install the Agent and deploy applications. -- Select **Continue**. The **Review YAML** settings appear. -- This is the manifest YAML for the Harness GitOps Agent. You will download this YAML file and run it in your Harness GitOps Agent cluster. + - Select **Delegates**. - ``` - kubectl apply -f gitops-agent.yml -n default - ``` + - Select **Install delegate**. For this tutorial, let's explore how to install the delegate using Helm. + - Add the Harness Helm chart repo to your local Helm registry. -- Select **Continue** and verify the Agent is successfully installed and can connect to Harness Manager. + ```bash + helm repo add harness-delegate https://app.harness.io/storage/harness-download/delegate-helm-chart/ + ``` - - + ```bash + helm repo update harness-delegate + ``` -- Select **Yes**, and then select **Start**. -- In **Name**, enter the name for the existing Argo CD project. -- In **Namespace**, enter the namespace where you want to install the Harness GitOps Agent. Typically, this is the target namespace for your deployment. -- Select **Next**. The **Review YAML** settings appear. -- This is the manifest YAML for the Harness GitOps Agent. You will download this YAML file and run it in your Harness GitOps Agent cluster. + - In the command provided, `ACCOUNT_ID`, `MANAGER_ENDPOINT`, and `DELEGATE_TOKEN` are auto-populated values that you can obtain from the delegate Installation wizard. - ```yaml - kubectl apply -f gitops-agent.yml -n default - ``` + ```bash + helm upgrade -i helm-delegate --namespace harness-delegate-ng --create-namespace \ + harness-delegate/harness-delegate-ng \ + --set delegateName=helm-delegate \ + --set accountId=ACCOUNT_ID \ + --set managerEndpoint=MANAGER_ENDPOINT \ + --set delegateDockerImage=harness/delegate:24.12.84702 \ + --set replicas=1 --set upgrader.enabled=true \ + --set delegateToken=DELEGATE_TOKEN + ``` -- Once you have installed the Agent, Harness will start importing all the entities from the existing Argo CD Project. + - Verify that the delegate is installed successfully and can connect to the Harness Manager. + - You can also follow the [Install Harness Delegate on Kubernetes or Docker](/docs/platform/get-started/tutorials/install-delegate) steps to install the delegate using the Terraform Helm Provider or Kubernetes manifest. - - +:::warning -### Repositories +If you plan to use your own Project, Organization, and custom names for Harness resources, please update the resource YAMLs accordingly with these details. -1. Select **Settings**, and then select **Repositories**. - - Select **New Repository**. - - Choose **Git**. - - Enter a name in **Repository**: `ownapp_repo`. - - In **GitOps Agent**, select the Agent that you installed in your cluster and select **Apply**. - - In **Git Repository URL**, paste `https://github.com/microservices-demo/microservices-demo`. - - Select **Continue** and choose **Specify Credentials For Repository**. - - Select **HTTPS** as the **Connection Type**. - - Select **Anonymous (no credentials required)** as the **Authentication** method. - - Select **Save & Continue** and wait for Harness to verify the connection. - - Finally, select **Finish**. +::: -### Clusters +### Secrets -1. Select **Settings**, and then select **Clusters**. - - Select **New Cluster**. - - In **Name**, enter a name for the cluster: `ownnapp_cluster`. - - In **GitOps Agent**, select the Agent you installed in your cluster, and then select **Apply**. - - Select **Continue** and select **Use the credentials of a specific Harness GitOps Agent**. - - Select **Save & Continue** and wait for the Harness to verify the connection. - - Finally, select **Finish**. +4. Under **Project Setup**, select **Secrets**. + - Select **New Secret**, and then select **Text**. + - Enter the secret name `ownappgitpat`. + - For the secret value, paste the GitHub personal access token you saved earlier. + - Select **Save**. -### Applications +### Connectors -1. Select **Applications**. +5. Create the **GitHub connector**. + - Copy the contents of [github-connector.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/github-connector.yml). + - In your Harness project in the Harness Manager, under **Project Setup**, select **Connectors**. + - Select **Create via YAML Builder** and paste the copied YAML. + - Assuming you have already forked the [harnesscd-example-apps](https://github.com/harness-community/harnesscd-example-apps/fork) repository mentioned earlier, replace **GITHUB_USERNAME** with your GitHub account username in the YAML. + - In `projectIdentifier`, verify that the project identifier is correct. You can see the Id in the browser URL (after `account`). If it is incorrect, the Harness YAML editor will suggest the correct Id. + - Select **Save Changes** and verify that the new connector named **ownapp_gitconnector** is successfully created. + - Finally, select **Connection Test** under **Connectivity Status** to ensure the connection is successful. +6. Create the **Kubernetes connector**. + - Copy the contents of [kubernetes-connector.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/kubernetes-connector.yml). + - In your Harness project, under **Project Setup**, select **Connectors**. + - Select **Create via YAML Builder** and and paste the copied YAML. + - Replace **DELEGATE_NAME** with the installed Delegate name. To obtain the Delegate name, navigate to **Project Setup**, and then **Delegates**. + - Select **Save Changes** and verify that the new connector named **ownapp_k8sconnector** is successfully created. + - Finally, select **Connection Test** under **Connectivity Status** to verify the connection is successful. - - Select **New Application**. +### Environment - - Enter the **Application Name**: `sockshop`. - - In **GitOps Agent**, select the Agent that you installed in your cluster and select **Apply**. - - Select **New Service**, and then toggle to **YAML** to use the YAML editor. - - Select **Edit YAML**, paste in the YAML below, and then select **Save**. +7. In your Harness project, select **Environments**. + - Select **New Environment**, and then select **YAML**. + - Copy the contents of [environment.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/environment.yml), paste it into the YAML editor, and select **Save**. + - In your new environment, select the **Infrastructure Definitions** tab. + - Select **Infrastructure Definition**, and then select **YAML**. + - Copy the contents of [infrastructure-definition.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/infrastructure-definition.yml) and paste it into the YAML editor. + - Select **Save** and verify that the environment and infrastructure definition are created successfully. - ```yaml - service: - name: ownapp_service - identifier: ownappservice - serviceDefinition: - type: Kubernetes - spec: {} - gitOpsEnabled: true - ``` +### Services - - Select **New Environment**, and the toggle to **YAML** to use the YAML editor. - - Select **Edit YAML**, paste in the YAML below, and then select **Save**. +8. In your Harness project, select **Services**. + - Select **New Service**. + - Enter the name `ownappservice`. + - Select **Save**, and then **YAML** (on the **Configuration** tab). + - Select **Edit YAML**, copy the contents of [service.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/service.yml), and paste it into the YAML editor. + - Select **Save**, and verify that the service **ownapp_service** is successfully created. - ```yaml - environment: - name: ownapp_env - identifier: ownappenv - description: "" - tags: {} - type: PreProduction - orgIdentifier: default - projectIdentifier: default_project - variables: [] - ``` +### Pipeline - - Next, select **Continue**, keep the **Sync Policy** settings as is, and select **Continue**. - - In **Repository URL**, select the **Repository** you created earlier, and then select **Apply**. - - Select **master** as the **Target Revision**, type `deploy/kubernetes` in the **Path**, and then select **Enter**. - - Select **Continue** and select the **Cluster** created in the above steps. - - In **Namespace**, enter the target namespace for Harness GitOps to sync the application. - - Enter `default` and select **Finish**. + + -2. Finally, it's time to **Synchronize** the GitOps Application state. Select **Sync**, check the Application details, and then select **Synchronize** to initiate the deployment. +9. In **Default Project**, select **Pipelines**. + - Select **New Pipeline**. + - Enter the name `ownapp_canary_pipeline`. + - Select **Inline** to store the pipeline in Harness. + - Select **Start** and, in the Pipeline Studio, toggle to **YAML** to use the YAML editor. + - Select **Edit YAML** to enable edit mode. + - Copy the contents of [canary-pipeline.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/canary-pipeline.yml). + - In your Harness pipeline YAML editor, paste the YAML. + - Select **Save**. - - After a successful execution, you can check the deployment on your Kubernetes cluster using the following command: + + - ```bash - kubectl get pods -n sock-shop - ``` +9. In **Default Project**, select **Pipelines**. + - Select **New Pipeline**. + - Enter the name `ownapp_bluegreen_pipeline`. + - Select **Inline** to store the pipeline in Harness. + - Select **Start** and, in the Pipeline Studio, toggle to **YAML** to use the YAML editor. + - Select **Edit YAML** to enable edit mode. + - Copy the contents of [bluegreen-pipeline.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/bluegreen-pipeline.yml). + - In your Harness pipeline YAML editor, paste the YAML. + - Select **Save**. - - Sock Shop is accessible via the master and any of the node urls on port `30001`. + + -A successful Application sync will display the following status tree under **Resource View**. +9. In **Default Project**, select **Pipelines**. + - Select **New Pipeline**. + - Enter the name `ownapp_rolling_pipeline`. + - Select **Inline** to store the pipeline in Harness. + - Select **Start** and, in the Pipeline Studio, toggle to **YAML** to use the YAML editor. + - Select **Edit YAML** to enable edit mode. + - Copy the contents of [rolling-pipeline.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/rolling-pipeline.yml). + - In your Harness pipeline YAML editor, paste the YAML. + - Select **Save**. - + -Harness offers a [Terraform Provider](https://registry.terraform.io/providers/harness/harness/latest/docs) to help you declaratively manage Harness GitOps entities alongside your application and cluster resources. These steps walk through using Terraform to create and install the GitOps agent, define related Harness entities, and deploy a sample application to your cluster. + + - +## Before you begin \{#before-you-begin-cli} -Before proceeding: +:::info -1. Generate a [Harness API token](/docs/platform/automation/api/add-and-manage-api-keys/#create-personal-api-keys-and-tokens). -1. Make sure [Terraform](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli) is installed on a computer that can connect to your cluster. +If you have already followed the steps from the Kubernetes tutorials and met all the requirements, then you can skip this. -### Harness Terraform Provider +::: -1. Clone or download the Harness [gitops-terraform-onboarding](https://github.com/harness-community/gitops-terraform-onboarding) project. +Verify the following: -``` -git clone https://github.com/harness-community/harnesscd-example-apps.git -cd deploy-own-app/gitops/terraform -``` +1. **Obtain Harness API Token**. For steps, go to the Harness documentation on [creating a personal API token](/docs/platform/automation/api/add-and-manage-api-keys/). +2. **Obtain GitHub personal access token with repo permissions**. For steps, go to the GitHub documentation on [creating a personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line). +3. **A Kubernetes cluster**. Use your own Kubernetes cluster or we recommend using [K3D](https://k3d.io/v5.5.1/) for installing Harness Delegates and deploying a sample application in a local development environment. + - Check [delegate system and network requirements](/docs/platform/delegates/delegate-concepts/delegate-requirements). +4. **Install the [Helm CLI](https://helm.sh/docs/intro/install/)**. +5. **Fork the [harnesscd-example-apps](https://github.com/harness-community/harnesscd-example-apps/fork)** repository using the GitHub web interface to utilize the Harness resource YAMLs. -2. Initialize the Terraform configuration. This step will also install the Harness provider plugin. +## Getting Started with Harness CD -```bash -terraform init -``` +1. Download and Configure Harness CLI. -
-What is a Terraform Provider? + + + + -A Terraform Provider is a plugin that allows Terraform to define and manage resources using a particular software API. In this tutorial these resources will be Harness entities. + + -
+ + + + -### Input variables + + -1. Open **terraform.tfvars**. This file contains example values for the Harness entities that will be created. - -```file -project_id = "default_project" -org_id = "default" -agent_identifier = "ownappagent" -agent_name = "ownappagent" -agent_namespace = "default" -repo_identifier = "ownapprepo" -repo_name = "ownapprepo" -repo_url = "https://github.com/microservices-demo/microservices-demo/" -cluster_identifier = "ownappcluster" -cluster_name = "ownappcluster" -env_name = "ownappenv" -service_name = "ownappservice" -``` - -2. In **terraform.tfvars**, change the value of **repo_url** to _https://github.com/microservices-demo/microservices-demo/_ repository or to your own app repo. - - - You are welcome to keep the other variable values as they are or rename them to suit your environment. - -3. Set **account_id** and **harness_api_token** as Terraform environment variables. Your Account ID can be found in the URL after account/ when you are logged into app.harness.io. - -``` -export TV_VAR_account_id="123abcXXXXXXXX" -export TV_VAR_harness_api_token="pat.abc123xxxxxxxxxx…" -``` - -:::warning - -Never store your Harness API Key in a plain text configuration file or in version control. Use an environment variable or dedicated secrets manager. - -::: - -### Terraform module - -
-What is a Terraform module? - -A Terraform module is a collection of files that define the desired state to be enforced by Terraform. These files normally have the .tf extension. - -
- -1. Open **agent.tf**. This file defines the GitOps agent in Harness and then deploys the agent manifest to your cluster. The agent is created using the harness_gitops_platform_agent resource. - -```json -resource "harness_platform_gitops_agent" "gitops_agent" { - identifier = var.agent_identifier - account_id = var.account_id - project_id = var.project_id - org_id = var.org_id - name = var.agent_name - type = "MANAGED_ARGO_PROVIDER" - metadata { - namespace = var.agent_namespace - high_availability = false - } -} -``` - -If you have an _existing_ Argo CD instance, change the type argument to CONNECTED_ARGO_PROVIDER. Otherwise leave as is. - -2. If you’ve made changes to any configuration files, verify the syntax is still valid. - -```bash -terraform validate -``` - -3. Preview the changes Terraform will make in Harness and your cluster. - -```bash -terraform plan -``` - -4. Apply the Terraform configuration to create the Harness and cluster resources. Type **yes** to confirm when prompted. - -```bash -terraform apply -``` - -Observe the output of `terraform apply` as your resources are created. It may take a few minutes for all the resources to be provisioned. - -### Verify GitOps deployment - -1. Log into [https://app.harness.io](https://app.harness.io). Select **Deployments**, then **GitOps**. - - - Select **Settings**, and then select **GitOps Agents** - - Verify your GitOps agent is listed and displays a HEALTHY health status. - -2. Navigate back to **Settings**, and then select **Repositories**. - - - Verify your **harnesscd-example-apps** repo is listed with Active connectivity status. - -3. Navigate back to **Settings**, and then select **Clusters**. - - - Verify you cluster with its associated GitOps agent is listed with Active connectivity status. - -4. Select **Application** from the top right of the page. + - - Click into the **sockshop** application. This is the application deployed from the **microservices-demo/microservices-demo/** repo. - - Select **Resource View** to see the cluster resources that have been deployed. A successful Application sync will display the following status tree. +
+
-5. Return to a local command line. Confirm you can see the GitOps agent and guestbook application resources in your cluster. +
+ -``` -kubectl get deployment -n default -kubectl get svc -n default -kubectl get pods -n default -``` + a. Open Windows Powershell and run the command below to download the Harness CLI. -6. To access the Sockshop application deployed via the Harness GitOps, you can check the deployment on your Kubernetes cluster using the following command: - `bash - kubectl get pods -n sock-shop - ` - - Sock Shop is accessible via the master and any of the node urls on port `30001`. + -### Cleaning up + b. Extract the downloaded zip file and change directory to extracted file location. -1. If you know longer need the resources created in this tutorial, run the following command to delete the GitOps agent and associated Harness entities. + c. Follow the steps below to make it accessible via terminal. -``` -terraform destroy -``` + ``` + $currentPath = Get-Location + [Environment]::SetEnvironmentVariable("PATH", "$env:PATH;$currentPath", [EnvironmentVariableTarget]::Machine) + ``` -**Note:** Since deleting the Sockshop application in Harness does not delete the deployed cluster resources themselves, you’ll need to manually remove the Kubernetes deployment. + d. Restart terminal. - - -1. Refer [Install and Configure Harness CLI](https://developer.harness.io/docs/platform/automation/cli/install) doc to setup and configure Harness CLI. +
-2. Clone the Forked **harnesscd-example-apps** repo and change directory. +2. Clone the forked **harnesscd-example-apps** repo and change directory. ```bash git clone https://github.com/GITHUB_ACCOUNTNAME/harnesscd-example-apps.git - cd harnesscd-example-apps/deploy-own-app/gitops + cd harnesscd-example-apps/deploy-own-app/cd-pipeline ``` :::note @@ -364,122 +272,21 @@ terraform destroy ::: -3. You have the option to use the same agent that you deployed during the Manifest tutorial or to deploy a new agent by following the steps below. However, remember to use a newly created agent identifier when creating repositories and clusters. - - Select **Settings**, and then select **GitOps Agents**. - - Select **New GitOps Agent**. - - When you are prompted with **Do you have any existing Argo CD instances?**, select **Yes** if you already have a Argo CD Instance, or else choose **No** to install the **Harness GitOps Agent**. - - - - -- Select **No**, and then select **Start**. -- In **Name**, enter the name for the new Agent `ownappagent` -- In **Namespace**, enter the namespace where you want to install the Harness GitOps Agent. Typically, this is the target namespace for your deployment. - - For this tutorial, let's use the `default` namespace to install the Agent and deploy applications. -- Select **Continue**. The **Review YAML** settings appear. -- This is the manifest YAML for the Harness GitOps Agent. You will download this YAML file and run it in your Harness GitOps Agent cluster. - - ``` - kubectl apply -f gitops-agent.yml -n default - ``` - -- Select **Continue** and verify the Agent is successfully installed and can connect to Harness Manager. - - - - -- Select **Yes**, and then select **Start**. -- In **Name**, enter the name for the existing Argo CD project. -- In **Namespace**, enter the namespace where you want to install the Harness GitOps Agent. Typically, this is the target namespace for your deployment. -- Select **Next**. The **Review YAML** settings appear. -- This is the manifest YAML for the Harness GitOps Agent. You will download this YAML file and run it in your Harness GitOps Agent cluster. - - ```yaml - kubectl apply -f gitops-agent.yml -n default - ``` - -- Once you have installed the Agent, Harness will start importing all the entities from the existing Argo CD Project. - - - - -4. Before proceeding, store the Agent Identifier value as an environment variable for use in the subsequent commands: - - ```bash - export AGENT_NAME=GITOPS_AGENT_IDENTIFIER - ``` - - > Note: Replace `GITOPS_AGENT_IDENTIFIER` with GitOps Agent Identifier. - -5. Create a **GitOps Repository**. - - ```bash - harness gitops-repository --file deploy-own-app/gitops/repository.yml apply --agent-identifier $AGENT_NAME - ``` - - > If you intend to use a private Git repository that hosts your manifest files, create a Harness secret containing the Git personal access token (PAT). Subsequently, create a new GitOps Repository pointing to your private repo. - -6. Create a **GitOps Cluster**. - - ```bash - harness gitops-cluster --file deploy-own-app/gitops/cluster.yml apply --agent-identifier $AGENT_NAME - ``` - -7. Create a **GitOps Application**. +3. Log in to Harness from the CLI. ```bash - harness gitops-application --file deploy-own-app/gitops/application.yml apply --agent-identifier $AGENT_NAME + harness login --api-key HARNESS_API_TOKEN --account-id HARNESS_ACCOUNT_ID ``` - > To deploy your own app, modify `repoURL` and `path` in the application.yml. - -8. At last, it's time to synchronize the application with your Kubernetes setup. - -- Navigate to Harness UI > Default Project > GitOps > Applications, then click on gitops-application. Choose Sync, followed by Synchronize to kick off the application deployment. - - - Observe the Sync state as Harness synchronizes the workload under `Resource View` tab. - - - After a successful execution, you can check the deployment on your Kubernetes cluster using the following command: - - ```bash - kubectl get pods -n sock-shop - ``` - - - Sock Shop is accessible via the master and any of the node urls on port `30001`. - -
-
- -### Congratulations!🎉 - -You've just learned how to use **Harness GitOps** to deploy an application using a Kubernetes manifest. - -Keep learning about Harness GitOps. Create a GitOps ApplicationSet and PR Pipeline in Harness GitOps by following this [guide](/docs/continuous-delivery/gitops/applicationsets/harness-git-ops-application-set-tutorial). - -
- - - - - -## Before you begin \{#before-you-begin-ui} - -Verify that you have the following: - -1. **Obtain GitHub personal access token with the repo scope**. See the GitHub documentation on [creating a personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line). -2. **A Kubernetes cluster**. Use your own Kubernetes cluster or we recommend using [K3D](https://k3d.io/v5.5.1/) for installing Harness Delegates and deploying a sample application in a local development environment. - - Check [Delegate system requirements](/docs/platform/delegates/delegate-concepts/delegate-requirements). -3. **Install the [Helm CLI](https://helm.sh/docs/intro/install/)** in order to install the Harness Helm delegate. -4. **Fork the [harnesscd-example-apps](https://github.com/harness-community/harnesscd-example-apps/fork)** repository using the GitHub web interface to utilize the Harness resource YAMLs. + :::note -## Getting Started with Harness CD \{#getting-started-harness-cd-ui} + Replace `HARNESS_API_TOKEN` with Harness API Token that you obtained during the prerequisite section of this tutorial, and HARNESS_ACCOUNT_ID with your Harness account ID (find in the URL when logged into https://app.harness.io). -1. Log in to [Harness](https://app.harness.io/). -2. Select **Projects**, and then select **Default Project**. + ::: ### Delegate -3. You have the option to use the same delegate that you deployed during the Manifest tutorial or to deploy a new delegate by following the steps below. However, remember to use a newly created delegate identifier when creating connectors. +4. You have the option to use the same delegate that you deployed during the Manifest tutorial or to deploy a new delegate by following the steps below. However, remember to use a newly created delegate identifier when creating connectors. - Log in to the [Harness UI](https://app.harness.io/). In **Project Setup**, select **Delegates**. @@ -518,341 +325,532 @@ If you plan to use your own Project, Organization, and custom names for Harness ::: -### Secrets +#### Secrets -4. Under **Project Setup**, select **Secrets**. - - Select **New Secret**, and then select **Text**. - - Enter the secret name `ownappgitpat`. - - For the secret value, paste the GitHub personal access token you saved earlier. - - Select **Save**. +5. If you intend to use a private Git repository that hosts your manifest files, create a Harness secret containing the Git personal access token (PAT) with name `ownappgitpat`. Subsequently, create a new Git connector using this secret. + - Under **Project Setup**, select **Secrets**. + - Select **New Secret**, and then select **Text**. + - Enter the secret name `ownappgitpat`. + - For the secret value, paste the GitHub personal access token you saved earlier. + - Select **Save**. -### Connectors +#### Connectors -5. Create the **GitHub connector**. - - Copy the contents of [github-connector.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/github-connector.yml). - - In your Harness project in the Harness Manager, under **Project Setup**, select **Connectors**. - - Select **Create via YAML Builder** and paste the copied YAML. - - Assuming you have already forked the [harnesscd-example-apps](https://github.com/harness-community/harnesscd-example-apps/fork) repository mentioned earlier, replace **GITHUB_USERNAME** with your GitHub account username in the YAML. - - In `projectIdentifier`, verify that the project identifier is correct. You can see the Id in the browser URL (after `account`). If it is incorrect, the Harness YAML editor will suggest the correct Id. - - Select **Save Changes** and verify that the new connector named **ownapp_gitconnector** is successfully created. - - Finally, select **Connection Test** under **Connectivity Status** to ensure the connection is successful. -6. Create the **Kubernetes connector**. - - Copy the contents of [kubernetes-connector.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/kubernetes-connector.yml). - - In your Harness project, under **Project Setup**, select **Connectors**. - - Select **Create via YAML Builder** and and paste the copied YAML. - - Replace **DELEGATE_NAME** with the installed Delegate name. To obtain the Delegate name, navigate to **Project Setup**, and then **Delegates**. - - Select **Save Changes** and verify that the new connector named **ownapp_k8sconnector** is successfully created. - - Finally, select **Connection Test** under **Connectivity Status** to verify the connection is successful. +6. Replace **url** with your GitHub Repo URL that hosts your manifest files in the `github-connector.yaml`. + +7. Verify that the `projectIdentifier` and `orgIdentifier` is correct and the `tokenRef` value matches the Git PAT secret you created in the previous step. + +8. Now create the **GitHub connector** using the following CLI command: + ``` + harness connector --file github-connector.yml apply --git-user + ``` +9. Utilize the same delegate that you deployed as part of the Manifest tutorial or use the newly created delegate identifier to create the **Kubernetes connector** using the following CLI command: + + ``` + harness connector --file kubernetes-connector.yml apply --delegate-name DELEGATE_IDENTIFIER + ``` ### Environment -7. In your Harness project, select **Environments**. - - Select **New Environment**, and then select **YAML**. - - Copy the contents of [environment.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/environment.yml), paste it into the YAML editor, and select **Save**. - - In your new environment, select the **Infrastructure Definitions** tab. - - Select **Infrastructure Definition**, and then select **YAML**. - - Copy the contents of [infrastructure-definition.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/infrastructure-definition.yml) and paste it into the YAML editor. - - Select **Save** and verify that the environment and infrastructure definition are created successfully. +10. Check the `environment.yml` file and use the following `harness` CLI Command to create **Environments** in your Harness project: + + ``` + harness environment --file environment.yml apply + ``` + +11. In the environment you created above, add **Infrastructure Definition** using below `infrastructure-definition.yml`. Then, invoke the CLI command to create the entity: + + ``` + harness infrastructure --file infrastructure-definition.yml apply + ``` ### Services -8. In your Harness project, select **Services**. - - Select **New Service**. - - Enter the name `ownappservice`. - - Select **Save**, and then **YAML** (on the **Configuration** tab). - - Select **Edit YAML**, copy the contents of [service.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/deploy-own-app/cd-pipeline/service.yml), and paste it into the YAML editor. - - Select **Save**, and verify that the service **ownapp_service** is successfully created. +12. Verify the `service.yml` and invoke the following CLI command to create **Services** in your Harness Project. -### Pipeline + ``` + harness service -file service.yml apply + ``` - - +### Pick Your Deployment Strategy -9. In **Default Project**, select **Pipelines**. - - Select **New Pipeline**. - - Enter the name `ownapp_canary_pipeline`. - - Select **Inline** to store the pipeline in Harness. - - Select **Start** and, in the Pipeline Studio, toggle to **YAML** to use the YAML editor. - - Select **Edit YAML** to enable edit mode, and choose any of the following execution strategies. Paste the respective YAML based on your selection. - - Copy the contents of [canary-pipeline.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/guestbook/harnesscd-pipeline/canary-pipeline.yml). - - In your Harness pipeline YAML editor, paste the YAML. - - Select **Save**. + + + +13. CLI Command for canary deployment: + ``` + harness pipeline --file canary-pipeline.yml apply + ``` + You can switch to the **Visual** editor and confirm the pipeline stage and execution steps. - + -9. In **Default Project**, select **Pipelines**. - - Select **New Pipeline**. - - Enter the name `ownapp_bluegreen_pipeline`. - - Select **Inline** to store the pipeline in Harness. - - Select **Start** and, in the Pipeline Studio, toggle to **YAML** to use the YAML editor. - - Select **Edit YAML** to enable edit mode, and choose any of the following execution strategies. Paste the respective YAML based on your selection. - - Copy the contents of [bluegreen-pipeline.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/guestbook/harnesscd-pipeline/bluegreen-pipeline.yml). - - In your Harness pipeline YAML editor, paste the YAML. - - Select **Save**. +13. CLI Command for blue-green deployment: + ``` + harness pipeline --file bluegreen-pipeline.yml apply + ``` + You can switch to the **Visual** pipeline editor and confirm the pipeline stage and execution steps. - + -9. In **Default Project**, select **Pipelines**. - - Select **New Pipeline**. - - Enter the name `ownapp_rolling_pipeline`. - - Select **Inline** to store the pipeline in Harness. - - Select **Start** and, in the Pipeline Studio, toggle to **YAML** to use the YAML editor. - - Select **Edit YAML** to enable edit mode, and choose any of the following execution strategies. Paste the respective YAML based on your selection. - - Copy the contents of [rolling-pipeline.yml](https://github.com/harness-community/harnesscd-example-apps/blob/master/guestbook/harnesscd-pipeline/rolling-pipeline.yml). - - In your Harness pipeline YAML editor, paste the YAML. - - Select **Save**. +13. CLI Command for Rolling deployment: + ``` + harness pipeline --file rolling-pipeline.yml apply + ``` + You can switch to the **Visual** pipeline editor and confirm the pipeline stage and execution steps. - + -## Before you begin \{#before-you-begin-cli} +### Manually execute deployment pipelines -:::info +Finally, it's time to execute your pipeline. Every exection of a CD pipeline leads to a deployment. -If you have already followed the steps from the Kubernetes tutorials and met all the requirements, then you can skip this. +- Select **Run**, and then select **Run Pipeline** to initiate the deployment. -::: + - Observe the execution logs as Harness deploys the workload and checks for steady state. + - After a successful execution, you can check the deployment on your Kubernetes cluster using the following command: -Verify the following: + ```bash + kubectl get pods -n harness-delegate-ng + ``` -1. **Obtain Harness API Token**. For steps, go to the Harness documentation on [creating a personal API token](/docs/platform/automation/api/add-and-manage-api-keys/). -2. **Obtain GitHub personal access token with repo permissions**. For steps, go to the GitHub documentation on [creating a personal access token](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line). -3. **A Kubernetes cluster**. Use your own Kubernetes cluster or we recommend using [K3D](https://k3d.io/v5.5.1/) for installing Harness Delegates and deploying a sample application in a local development environment. - - Check [delegate system and network requirements](/docs/platform/delegates/delegate-concepts/delegate-requirements). -4. **Install the [Helm CLI](https://helm.sh/docs/intro/install/)**. -5. **Fork the [harnesscd-example-apps](https://github.com/harness-community/harnesscd-example-apps/fork)** repository using the GitHub web interface to utilize the Harness resource YAMLs. +### Automate deployments -## Getting Started with Harness CD +#### Using Triggers -1. Download and Configure Harness CLI. +With [Pipeline Triggers](/docs/category/triggers), you can start automating your deployments based on events happening in an external system. This system could be a Source Repository, an Artifact Repository, or a third party system. Any Developer with Pipeline Create and Edit permissions can configure a trigger in Harness. - - - - +Follow the [Pipeline Triggers tutorial](/docs/platform/triggers/tutorial-cd-trigger) to see triggers in action. + +#### Using API + +You can also utilize the [Harness API](/docs/category/api) to manage resources, view, create/edit, or delete them. + +Refer to the [Get started with Harness API](/docs/platform/automation/api/api-quickstart) guide to learn how to use the API for automation. + +### Congratulations!🎉 + +You've just learned how to use Harness CD to deploy your own application. - - - - - + + +## Before you begin \{#before-you-begin-gitops} + +Verify that you have the following: + +1. **A Kubernetes cluster**. We recommend [K3D](https://k3d.io/v5.5.1/) for installing the Harness GitOps Agent and deploying a sample application in a local development environment. + - For requirements, go to [Harness GitOps Agent Requirements](/docs/continuous-delivery/gitops/connect-and-manage/install-a-harness-git-ops-agent#requirements). +2. **Fork the [harnesscd-example-apps](https://github.com/harness-community/harnesscd-example-apps/fork)** repository using the GitHub web interface to utilize the Harness resource YAMLs. + +## Getting Started with Harness GitOps + + + + +1. Login to [Harness](https://app.harness.io/). +2. Select **Projects**, and then select **Default Project**. +3. Select **Deployments**, and then select **GitOps**. + +### GitOps Agent + +1. You have the option to use the same agent that you deployed during the Manifest tutorial or to deploy a new agent by following the steps below. However, remember to use a newly created agent identifier when creating repositories and clusters. + - Select **Settings**, and then select **GitOps Agents**. + - Select **New GitOps Agent**. + - When are prompted with **Do you have any existing Argo CD instances?**, select **Yes** if you already have a Argo CD Instance, or else choose **No** to install the **Harness GitOps Agent**. + + + + +- Select **No**, and then select **Start**. +- In **Name**, enter the name for the new Agent `ownappagent` +- In **Namespace**, enter the namespace where you want to install the Harness GitOps Agent. Typically, this is the target namespace for your deployment. + - For this tutorial, let's use the `default` namespace to install the Agent and deploy applications. +- Select **Continue**. The **Review YAML** settings appear. +- This is the manifest YAML for the Harness GitOps Agent. You will download this YAML file and run it in your Harness GitOps Agent cluster. + + ``` + kubectl apply -f gitops-agent.yml -n default + ``` + +- Select **Continue** and verify the Agent is successfully installed and can connect to Harness Manager. - + - +- Select **Yes**, and then select **Start**. +- In **Name**, enter the name for the existing Argo CD project. +- In **Namespace**, enter the namespace where you want to install the Harness GitOps Agent. Typically, this is the target namespace for your deployment. +- Select **Next**. The **Review YAML** settings appear. +- This is the manifest YAML for the Harness GitOps Agent. You will download this YAML file and run it in your Harness GitOps Agent cluster. + + ```yaml + kubectl apply -f gitops-agent.yml -n default + ``` + +- Once you have installed the Agent, Harness will start importing all the entities from the existing Argo CD Project. +### Repositories + +1. Select **Settings**, and then select **Repositories**. + - Select **New Repository**. + - Choose **Git**. + - Enter a name in **Repository**: `ownapp_repo`. + - In **GitOps Agent**, select the Agent that you installed in your cluster and select **Apply**. + - In **Git Repository URL**, paste `https://github.com/microservices-demo/microservices-demo`. + - Select **Continue** and choose **Specify Credentials For Repository**. + - Select **HTTPS** as the **Connection Type**. + - Select **Anonymous (no credentials required)** as the **Authentication** method. + - Select **Save & Continue** and wait for Harness to verify the connection. + - Finally, select **Finish**. + +### Clusters + +1. Select **Settings**, and then select **Clusters**. + - Select **New Cluster**. + - In **Name**, enter a name for the cluster: `ownnapp_cluster`. + - In **GitOps Agent**, select the Agent you installed in your cluster, and then select **Apply**. + - Select **Continue** and select **Use the credentials of a specific Harness GitOps Agent**. + - Select **Save & Continue** and wait for the Harness to verify the connection. + - Finally, select **Finish**. + +### Applications + +1. Select **Applications**. + + - Select **New Application**. + + - Enter the **Application Name**: `sockshop`. + - In **GitOps Agent**, select the Agent that you installed in your cluster and select **Apply**. + - Select **New Service**, and then toggle to **YAML** to use the YAML editor. + - Select **Edit YAML**, paste in the YAML below, and then select **Save**. + + ```yaml + service: + name: ownapp_service + identifier: ownappservice + serviceDefinition: + type: Kubernetes + spec: {} + gitOpsEnabled: true + ``` + + - Select **New Environment**, and the toggle to **YAML** to use the YAML editor. + - Select **Edit YAML**, paste in the YAML below, and then select **Save**. + + ```yaml + environment: + name: ownapp_env + identifier: ownappenv + description: "" + tags: {} + type: PreProduction + orgIdentifier: default + projectIdentifier: default_project + variables: [] + ``` + + - Next, select **Continue**, keep the **Sync Policy** settings as is, and select **Continue**. + - In **Repository URL**, select the **Repository** you created earlier, and then select **Apply**. + - Select **master** as the **Target Revision**, type `deploy/kubernetes` in the **Path**, and then select **Enter**. + - Select **Continue** and select the **Cluster** created in the above steps. + - In **Namespace**, enter the target namespace for Harness GitOps to sync the application. + - Enter `default` and select **Finish**. + +2. Finally, it's time to **Synchronize** the GitOps Application state. Select **Sync**, check the Application details, and then select **Synchronize** to initiate the deployment. + + - After a successful execution, you can check the deployment on your Kubernetes cluster using the following command: + + ```bash + kubectl get pods -n sock-shop + ``` + + - Sock Shop is accessible via the master and any of the node urls on port `30001`. + +A successful Application sync will display the following status tree under **Resource View**. + - + - a. Open Windows Powershell and run the command below to download the Harness CLI. +Harness offers a [Terraform Provider](https://registry.terraform.io/providers/harness/harness/latest/docs) to help you declaratively manage Harness GitOps entities alongside your application and cluster resources. These steps walk through using Terraform to create and install the GitOps agent, define related Harness entities, and deploy a sample application to your cluster. - + - b. Extract the downloaded zip file and change directory to extracted file location. +Before proceeding: + +1. Generate a [Harness API token](/docs/platform/automation/api/add-and-manage-api-keys/#create-personal-api-keys-and-tokens). +1. Make sure [Terraform](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli) is installed on a computer that can connect to your cluster. + +### Harness Terraform Provider + +1. Clone or download the Harness [gitops-terraform-onboarding](https://github.com/harness-community/gitops-terraform-onboarding) project. + +``` +git clone https://github.com/harness-community/harnesscd-example-apps.git +cd deploy-own-app/gitops/terraform +``` + +2. Initialize the Terraform configuration. This step will also install the Harness provider plugin. + +```bash +terraform init +``` + +
+What is a Terraform Provider? + +A Terraform Provider is a plugin that allows Terraform to define and manage resources using a particular software API. In this tutorial these resources will be Harness entities. + +
+ +### Input variables + +1. Open **terraform.tfvars**. This file contains example values for the Harness entities that will be created. + +```file +project_id = "default_project" +org_id = "default" +agent_identifier = "ownappagent" +agent_name = "ownappagent" +agent_namespace = "default" +repo_identifier = "ownapprepo" +repo_name = "ownapprepo" +repo_url = "https://github.com/microservices-demo/microservices-demo/" +cluster_identifier = "ownappcluster" +cluster_name = "ownappcluster" +env_name = "ownappenv" +service_name = "ownappservice" +``` + +2. In **terraform.tfvars**, change the value of **repo_url** to _https://github.com/microservices-demo/microservices-demo/_ repository or to your own app repo. + + - You are welcome to keep the other variable values as they are or rename them to suit your environment. + +3. Set **account_id** and **harness_api_token** as Terraform environment variables. Your Account ID can be found in the URL after account/ when you are logged into app.harness.io. + +``` +export TV_VAR_account_id="123abcXXXXXXXX" +export TV_VAR_harness_api_token="pat.abc123xxxxxxxxxx…" +``` + +:::warning + +Never store your Harness API Key in a plain text configuration file or in version control. Use an environment variable or dedicated secrets manager. + +::: + +### Terraform module - c. Follow the steps below to make it accessible via terminal. +
+What is a Terraform module? - ``` - $currentPath = Get-Location - [Environment]::SetEnvironmentVariable("PATH", "$env:PATH;$currentPath", [EnvironmentVariableTarget]::Machine) - ``` +A Terraform module is a collection of files that define the desired state to be enforced by Terraform. These files normally have the .tf extension. - d. Restart terminal. +
-
-
+1. Open **agent.tf**. This file defines the GitOps agent in Harness and then deploys the agent manifest to your cluster. The agent is created using the harness_gitops_platform_agent resource. -2. Clone the forked **harnesscd-example-apps** repo and change directory. +```json +resource "harness_platform_gitops_agent" "gitops_agent" { + identifier = var.agent_identifier + account_id = var.account_id + project_id = var.project_id + org_id = var.org_id + name = var.agent_name + type = "MANAGED_ARGO_PROVIDER" + metadata { + namespace = var.agent_namespace + high_availability = false + } +} +``` - ```bash - git clone https://github.com/GITHUB_ACCOUNTNAME/harnesscd-example-apps.git - cd harnesscd-example-apps/deploy-own-app/cd-pipeline - ``` +If you have an _existing_ Argo CD instance, change the type argument to CONNECTED_ARGO_PROVIDER. Otherwise leave as is. - :::note +2. If you’ve made changes to any configuration files, verify the syntax is still valid. - Replace `GITHUB_ACCOUNTNAME` with your GitHub Account name. +```bash +terraform validate +``` - ::: +3. Preview the changes Terraform will make in Harness and your cluster. -3. Log in to Harness from the CLI. +```bash +terraform plan +``` - ```bash - harness login --api-key HARNESS_API_TOKEN --account-id HARNESS_ACCOUNT_ID - ``` +4. Apply the Terraform configuration to create the Harness and cluster resources. Type **yes** to confirm when prompted. - :::note +```bash +terraform apply +``` - Replace `HARNESS_API_TOKEN` with Harness API Token that you obtained during the prerequisite section of this tutorial, and HARNESS_ACCOUNT_ID with your Harness account ID (find in the URL when logged into https://app.harness.io). +Observe the output of `terraform apply` as your resources are created. It may take a few minutes for all the resources to be provisioned. - ::: +### Verify GitOps deployment -### Delegate +1. Log into [https://app.harness.io](https://app.harness.io). Select **Deployments**, then **GitOps**. -4. You have the option to use the same delegate that you deployed during the Manifest tutorial or to deploy a new delegate by following the steps below. However, remember to use a newly created delegate identifier when creating connectors. + - Select **Settings**, and then select **GitOps Agents** + - Verify your GitOps agent is listed and displays a HEALTHY health status. -- Log in to the [Harness UI](https://app.harness.io/). In **Project Setup**, select **Delegates**. +2. Navigate back to **Settings**, and then select **Repositories**. - - Select **Delegates**. + - Verify your **harnesscd-example-apps** repo is listed with Active connectivity status. - - Select **Install delegate**. For this tutorial, let's explore how to install the delegate using Helm. - - Add the Harness Helm chart repo to your local Helm registry. +3. Navigate back to **Settings**, and then select **Clusters**. - ```bash - helm repo add harness-delegate https://app.harness.io/storage/harness-download/delegate-helm-chart/ - ``` + - Verify you cluster with its associated GitOps agent is listed with Active connectivity status. - ```bash - helm repo update harness-delegate - ``` +4. Select **Application** from the top right of the page. - - In the command provided, `ACCOUNT_ID`, `MANAGER_ENDPOINT`, and `DELEGATE_TOKEN` are auto-populated values that you can obtain from the delegate Installation wizard. + - Click into the **sockshop** application. This is the application deployed from the **microservices-demo/microservices-demo/** repo. + - Select **Resource View** to see the cluster resources that have been deployed. A successful Application sync will display the following status tree. - ```bash - helm upgrade -i helm-delegate --namespace harness-delegate-ng --create-namespace \ - harness-delegate/harness-delegate-ng \ - --set delegateName=helm-delegate \ - --set accountId=ACCOUNT_ID \ - --set managerEndpoint=MANAGER_ENDPOINT \ - --set delegateDockerImage=harness/delegate:23.03.78904 \ - --set replicas=1 --set upgrader.enabled=false \ - --set delegateToken=DELEGATE_TOKEN - ``` +5. Return to a local command line. Confirm you can see the GitOps agent and guestbook application resources in your cluster. - - Verify that the delegate is installed successfully and can connect to the Harness Manager. - - You can also follow the [Install Harness Delegate on Kubernetes or Docker](/docs/platform/get-started/tutorials/install-delegate) steps to install the delegate using the Terraform Helm Provider or Kubernetes manifest. +``` +kubectl get deployment -n default +kubectl get svc -n default +kubectl get pods -n default +``` -:::warning +6. To access the Sockshop application deployed via the Harness GitOps, you can check the deployment on your Kubernetes cluster using the following command: + `bash + kubectl get pods -n sock-shop + ` + - Sock Shop is accessible via the master and any of the node urls on port `30001`. -If you plan to use your own Project, Organization, and custom names for Harness resources, please update the resource YAMLs accordingly with these details. +### Cleaning up -::: +1. If you know longer need the resources created in this tutorial, run the following command to delete the GitOps agent and associated Harness entities. -#### Secrets +``` +terraform destroy +``` -5. If you intend to use a private Git repository that hosts your manifest files, create a Harness secret containing the Git personal access token (PAT) with name `ownappgitpat`. Subsequently, create a new Git connector using this secret. - - Under **Project Setup**, select **Secrets**. - - Select **New Secret**, and then select **Text**. - - Enter the secret name `ownappgitpat`. - - For the secret value, paste the GitHub personal access token you saved earlier. - - Select **Save**. +**Note:** Since deleting the Sockshop application in Harness does not delete the deployed cluster resources themselves, you’ll need to manually remove the Kubernetes deployment. -#### Connectors +
+ -6. Replace **url** with your GitHub Repo URL that hosts your manifest files in the `github-connector.yaml`. +1. Refer [Install and Configure Harness CLI](https://developer.harness.io/docs/platform/automation/cli/install) doc to setup and configure Harness CLI. -7. Verify that the `projectIdentifier` and `orgIdentifier` is correct and the `tokenRef` value matches the Git PAT secret you created in the previous step. +2. Clone the Forked **harnesscd-example-apps** repo and change directory. -8. Now create the **GitHub connector** using the following CLI command: - ``` - harness connector --file github-connector.yml apply --git-user + ```bash + git clone https://github.com/GITHUB_ACCOUNTNAME/harnesscd-example-apps.git + cd harnesscd-example-apps/deploy-own-app/gitops ``` -9. Utilize the same delegate that you deployed as part of the Manifest tutorial or use the newly created delegate identifier to create the **Kubernetes connector** using the following CLI command: - ``` - harness connector --file kubernetes-connector.yml apply --delegate-name DELEGATE_IDENTIFIER - ``` + :::note -### Environment + Replace `GITHUB_ACCOUNTNAME` with your GitHub Account name. -10. Check the `environment.yml` file and use the following `harness` CLI Command to create **Environments** in your Harness project: + ::: - ``` - harness environment --file environment.yml apply - ``` +3. You have the option to use the same agent that you deployed during the Manifest tutorial or to deploy a new agent by following the steps below. However, remember to use a newly created agent identifier when creating repositories and clusters. + - Select **Settings**, and then select **GitOps Agents**. + - Select **New GitOps Agent**. + - When you are prompted with **Do you have any existing Argo CD instances?**, select **Yes** if you already have a Argo CD Instance, or else choose **No** to install the **Harness GitOps Agent**. -11. In the environment you created above, add **Infrastructure Definition** using below `infrastructure-definition.yml`. Then, invoke the CLI command to create the entity: + + - ``` - harness infrastructure --file infrastructure-definition.yml apply - ``` +- Select **No**, and then select **Start**. +- In **Name**, enter the name for the new Agent `ownappagent` +- In **Namespace**, enter the namespace where you want to install the Harness GitOps Agent. Typically, this is the target namespace for your deployment. + - For this tutorial, let's use the `default` namespace to install the Agent and deploy applications. +- Select **Continue**. The **Review YAML** settings appear. +- This is the manifest YAML for the Harness GitOps Agent. You will download this YAML file and run it in your Harness GitOps Agent cluster. -### Services + ``` + kubectl apply -f gitops-agent.yml -n default + ``` -12. Verify the `service.yml` and invoke the following CLI command to create **Services** in your Harness Project. +- Select **Continue** and verify the Agent is successfully installed and can connect to Harness Manager. - ``` - harness service -file service.yml apply - ``` + + -### Pick Your Deployment Strategy +- Select **Yes**, and then select **Start**. +- In **Name**, enter the name for the existing Argo CD project. +- In **Namespace**, enter the namespace where you want to install the Harness GitOps Agent. Typically, this is the target namespace for your deployment. +- Select **Next**. The **Review YAML** settings appear. +- This is the manifest YAML for the Harness GitOps Agent. You will download this YAML file and run it in your Harness GitOps Agent cluster. - - + ```yaml + kubectl apply -f gitops-agent.yml -n default + ``` -13. CLI Command for canary deployment: - ``` - harness pipeline --file canary-pipeline.yml apply - ``` - You can switch to the **Visual** editor and confirm the pipeline stage and execution steps. +- Once you have installed the Agent, Harness will start importing all the entities from the existing Argo CD Project. - + -13. CLI Command for blue-green deployment: - ``` - harness pipeline --file bluegreen-pipeline.yml apply - ``` - You can switch to the **Visual** pipeline editor and confirm the pipeline stage and execution steps. +4. Before proceeding, store the Agent Identifier value as an environment variable for use in the subsequent commands: - - + ```bash + export AGENT_NAME=GITOPS_AGENT_IDENTIFIER + ``` -13. CLI Command for Rolling deployment: - ``` - harness pipeline --file rolling-pipeline.yml apply - ``` - You can switch to the **Visual** pipeline editor and confirm the pipeline stage and execution steps. + > Note: Replace `GITOPS_AGENT_IDENTIFIER` with GitOps Agent Identifier. - - +5. Create a **GitOps Repository**. - -
+ ```bash + harness gitops-repository --file deploy-own-app/gitops/repository.yml apply --agent-identifier $AGENT_NAME + ``` -### Manually execute deployment pipelines + > If you intend to use a private Git repository that hosts your manifest files, create a Harness secret containing the Git personal access token (PAT). Subsequently, create a new GitOps Repository pointing to your private repo. -Finally, it's time to execute your pipeline. Every exection of a CD pipeline leads to a deployment. +6. Create a **GitOps Cluster**. -- Select **Run**, and then select **Run Pipeline** to initiate the deployment. + ```bash + harness gitops-cluster --file deploy-own-app/gitops/cluster.yml apply --agent-identifier $AGENT_NAME + ``` - - Observe the execution logs as Harness deploys the workload and checks for steady state. - - After a successful execution, you can check the deployment on your Kubernetes cluster using the following command: +7. Create a **GitOps Application**. - ```bash - kubectl get pods -n sock-shop - ``` + ```bash + harness gitops-application --file deploy-own-app/gitops/application.yml apply --agent-identifier $AGENT_NAME + ``` - - Sock Shop is accessible via the master and any of the node urls on port `30001`. + > To deploy your own app, modify `repoURL` and `path` in the application.yml. -### Automate deployments +8. At last, it's time to synchronize the application with your Kubernetes setup. -#### Using Triggers +- Navigate to Harness UI > Default Project > GitOps > Applications, then click on gitops-application. Choose Sync, followed by Synchronize to kick off the application deployment. -With [Pipeline Triggers](/docs/category/triggers), you can start automating your deployments based on events happening in an external system. This system could be a Source Repository, an Artifact Repository, or a third party system. Any Developer with Pipeline Create and Edit permissions can configure a trigger in Harness. + - Observe the Sync state as Harness synchronizes the workload under `Resource View` tab. -Follow the [Pipeline Triggers tutorial](/docs/platform/triggers/tutorial-cd-trigger) to see triggers in action. + - After a successful execution, you can check the deployment on your Kubernetes cluster using the following command: -#### Using API + ```bash + kubectl get pods -n sock-shop + ``` -You can also utilize the [Harness API](/docs/category/api) to manage resources, view, create/edit, or delete them. + - Sock Shop is accessible via the master and any of the node urls on port `30001`. -Refer to the [Get started with Harness API](/docs/platform/automation/api/api-quickstart) guide to learn how to use the API for automation. +
+
### Congratulations!🎉 -You've just learned how to use Harness CD to deploy your own application. +You've just learned how to use **Harness GitOps** to deploy an application using a Kubernetes manifest. + +Keep learning about Harness GitOps. Create a GitOps ApplicationSet and PR Pipeline in Harness GitOps by following this [guide](/docs/continuous-delivery/gitops/applicationsets/harness-git-ops-application-set-tutorial).
+
diff --git a/docs/continuous-delivery/get-started/services-and-environments-overview.md b/docs/continuous-delivery/get-started/services-and-environments-overview.md index 9a4b436af40..9e333570261 100644 --- a/docs/continuous-delivery/get-started/services-and-environments-overview.md +++ b/docs/continuous-delivery/get-started/services-and-environments-overview.md @@ -581,6 +581,10 @@ When you select an environment in a stage, you can select the **Infrastructure D ![](./static/services-and-environments-overview-14.png) +:::info note +During pipeline execution, all infrastructure definitions are displayed, regardless of whether they are scoped to the selected service.Users are advised to manually ensure that only the appropriate infrastructure definitions are chosen for their services. This limitation is more prominent when services or environments are dynamically expressed, as scoping may not apply consistently. +::: + #### Infrastructure Tags Tags can be attached to infrastructure definitions representing their characteristics. These tags can be key value pairs. diff --git a/docs/continuous-delivery/gitops/connect-and-manage/install-a-harness-git-ops-agent.md b/docs/continuous-delivery/gitops/connect-and-manage/install-a-harness-git-ops-agent.md index c02acd704b5..d70cc5b87ee 100644 --- a/docs/continuous-delivery/gitops/connect-and-manage/install-a-harness-git-ops-agent.md +++ b/docs/continuous-delivery/gitops/connect-and-manage/install-a-harness-git-ops-agent.md @@ -536,7 +536,9 @@ Here are some answers to commonly asked GitOps Agent questions. | GitOps Agent version | Packaged Argo CD version | Supported Argo CD versions | Redis version | | -------------------- | ------------------------ | --------------------------------------------- | ------------------- | -| 0.78.0 | v2.10.14 | v2.8.2, 2.9.0, 2.9.3, 2.9.4, 2.10.10, 2.10.14 | redis:7.2.4-alpine | +| 0.83.0 | v2.10.14 | v2.8.2, 2.9.0, 2.9.3, 2.9.4, 2.10.10, 2.10.14 | redis:7.2.4-alpine | +| 0.82.0 | RELEASE BURNED | RELEASE BURNED | RELEASE BURNED | +| 0.78.0 - 0.81.0 | v2.10.14 | v2.8.2, 2.9.0, 2.9.3, 2.9.4, 2.10.10, 2.10.14 | redis:7.2.4-alpine | | 0.77.0 | v2.10.10 | v2.8.2, 2.9.0, 2.9.3, 2.9.4, 2.10.10 | redis:7.2.4-alpine | | 0.67.0 - 0.76.0 | v2.9.4 | v2.7.2, v.2.7.8, v2.8.2, 2.9.0, 2.9.3, 2.9.4 | redis:7.0.11-alpine | | 0.66.0 | v2.9.3 | v2.7.2, v.2.7.8, v2.8.2, 2.9.0, 2.9.3 | redis:7.0.11-alpine | @@ -545,6 +547,10 @@ Here are some answers to commonly asked GitOps Agent questions. The supported Argo CD versions listed above are what Harness has verified. However, any patch version should work for each minor version listed. For example, the GitOps agent version `0.78.0` should support the Argo CD version of `2.10.0`, but this is not verified by Harness. +:::warning +The GitOps Agent version 0.82.0 was published to docker, but the release has been burned due to backward compatibility issues. Please skip that release. +::: + ### How long is a GitOps Agent version supported? Harness supports GitOps Agent versions that support Argo CD versions n to n-2 minor version (e.g 2.5.4, 2.4.4, 2.3.4). diff --git a/docs/continuous-delivery/gitops/pr-pipelines/gitops-pipeline-steps.md b/docs/continuous-delivery/gitops/pr-pipelines/gitops-pipeline-steps.md index e93bf41f458..588bc9940f3 100644 --- a/docs/continuous-delivery/gitops/pr-pipelines/gitops-pipeline-steps.md +++ b/docs/continuous-delivery/gitops/pr-pipelines/gitops-pipeline-steps.md @@ -185,6 +185,18 @@ If a parameter is specified both in the values file and as a parameter or file p Once your GitOps application is updated, you can use the GitOps Sync step to deploy your changes. +#### Update GitOps App step for multi-source applications + +:::note + +Currently, support for multi-source applications are behind the feature flag `GITOPS_MULTI_SOURCE_ENABLED`. Please contact Harness support to enable this feature. + +::: + +With this feature enabled, you can select your multi-source application in the **Application** field. This will populate the step with all the sources for the selected application. + +From there, you can update each source individually as you would for a single source application and described above. + ### GitOps Sync step This step triggers a sync for your existing or updated GitOps application. diff --git a/docs/continuous-delivery/gitops/use-gitops/add-a-harness-git-ops-repository.md b/docs/continuous-delivery/gitops/use-gitops/add-a-harness-git-ops-repository.md index 6005d351edc..8ae74a9a5d2 100644 --- a/docs/continuous-delivery/gitops/use-gitops/add-a-harness-git-ops-repository.md +++ b/docs/continuous-delivery/gitops/use-gitops/add-a-harness-git-ops-repository.md @@ -71,7 +71,7 @@ import TabItem from '@theme/TabItem'; - If you use Two-Factor Authentication for your Git repo, you connect over **HTTPS** or **SSH**. - For **SSH**, ensure that the key is not OpenSSH, but rather PEM format. To generate an SSHv2 key, use: `ssh-keygen -t rsa -m PEM` The `rsa` and `-m PEM` ensure the algorithm and that the key is PEM. Next, follow the prompts to create the PEM key. - For more information, see the [ssh-keygen man page](https://linux.die.net/man/1/ssh-keygen). - - **HTTP** also has the **Anonymous** option. + - **HTTP** also has the **Anonymous** option. When a repository is configured with an anonymous connection type (public - no credentials), Harness automatically uses the repository credentials if they are available, even if you do not explicitly select them in the UI. - For steps on setting up the GitHub App, go to [Use a GitHub App in a GitHub Connector](/docs/platform/connectors/code-repositories/git-hub-app-support). - Select **Save & Continue**. Harness validates the connection. * **Use a Credentials Template** diff --git a/docs/continuous-delivery/manage-deployments/rollback-deployments.md b/docs/continuous-delivery/manage-deployments/rollback-deployments.md index 0b3d2e1f390..6e82240132f 100644 --- a/docs/continuous-delivery/manage-deployments/rollback-deployments.md +++ b/docs/continuous-delivery/manage-deployments/rollback-deployments.md @@ -111,4 +111,8 @@ To perform a Post-Deployment Rollback, the user must have the following permissi 1. Pipeline **Execute** permission for the specific pipeline. 2. Environment **Rollback** permission. -A user will only be allowed to execute rollbacks on any instance if they possess these two permissions for the pipeline and environment through which the deployment occurred. Otherwise, they will see the following message when attempting to click Rollback. \ No newline at end of file +A user will only be allowed to execute rollbacks on any instance if they possess these two permissions for the pipeline and environment through which the deployment occurred. Otherwise, they will see the following message when attempting to click Rollback. + +## Rollback Using API + +You can roll back deployments programmatically using Harness APIs. For more information, go to [Harness API Documentation](https://apidocs.harness.io/tag/Rollback). \ No newline at end of file diff --git a/docs/continuous-delivery/toolchain-policy.md b/docs/continuous-delivery/toolchain-policy.md index 63ecbc10050..b44c3e5e5e5 100644 --- a/docs/continuous-delivery/toolchain-policy.md +++ b/docs/continuous-delivery/toolchain-policy.md @@ -21,17 +21,17 @@ Harness will test integration updates against the corresponding and impacted dep Below is a table that shows the current state of our integrations, demonstrating our commitment to updating them to the latest versions: -| Connector | Integration | Supported Version | Latest Version | Upgrade Cycle | +| Connector | Integration | Supported Version | Latest Version | Upgrade Cycle | |-------------|----------------------|-------------------|----------------|---------------| | Kubernetes | Kubernetes | 1.27.4 | 1.30.3 | Quarterly | | Helm | Helm | 3.8.x | 3.14.x | Quarterly | | Tanzu | Tanzu | cf cli v7 | cf cli v8 | Quarterly | | Terraform | NaN | 1.3.5 | 1.9.4 | Quarterly | | Terragrunt | NaN | 0.40.x | 0.66.4 | Quarterly | -| NaN | Serverless.com | 3.x | 4.x | Quarterly | +| Serverless |[Serverless.com plugin](https://hub.docker.com/r/harness/serverless-plugin/tags)| 3.x | 4.x | Quarterly | | Jenkins | Jenkins Build Step | 2.440 | 2.471 | Quarterly | | NaN | ArgoCD | 2.8.x | 2.12.x | Quarterly | -| AWS | AWS SAM | 1.84.0 | 1.121.0 | Quarterly | +| AWS | [AWS SAM Plugin](https://hub.docker.com/r/harness/aws-sam-plugin/tags)| 1.84.0| 1.121.0 | Quarterly | ### Kubernetes Maintenance FAQ diff --git a/docs/continuous-delivery/verify/cv-getstarted/configure-first-cv.md b/docs/continuous-delivery/verify/cv-getstarted/configure-first-cv.md index 0722cc077d7..b8d5d26e719 100644 --- a/docs/continuous-delivery/verify/cv-getstarted/configure-first-cv.md +++ b/docs/continuous-delivery/verify/cv-getstarted/configure-first-cv.md @@ -190,7 +190,7 @@ To set a fail-fast threshold, follow these steps: 3. In the **Action** field, select what the CV should do when applying the rule. Let's select **Fail Immediately**. -4. In the **Criteria** field, choose **Percentage Deviation**, and from the greater than select **1**. This sets a threshold for memory usage beyond which the system or application is considered to be in a problematic state, and action should be taken. +4. In the **Criteria** field, choose **Percentage Deviation**, and from the greater than select **1**. This sets a threshold for memory usage beyond which the system or application is considered to be in a problematic state, and action should be taken. Please note that when using **Percentage Deviation**, to observe results using [Normalized Views](https://developer.harness.io/docs/continuous-delivery/verify/cv-results/interpret-metric-results/#filter-metric-summary). ![Configure threshold](./static/cv-simple-cv-select-failfast-threshold.png) diff --git a/docs/continuous-delivery/verify/cv-results/interpret-metric-results.md b/docs/continuous-delivery/verify/cv-results/interpret-metric-results.md index c06e505014b..1ef71bcfa44 100644 --- a/docs/continuous-delivery/verify/cv-results/interpret-metric-results.md +++ b/docs/continuous-delivery/verify/cv-results/interpret-metric-results.md @@ -105,6 +105,8 @@ Any metric marked as "Throughput" is not analyzed during the verification proces You can customize the metrics summary view using the following filters: + + - **Group**: When configuring the health source, you have the flexibility to group a set of metrics together, such as a transaction group. Using the **Group** filter, you can filter the results based on these predefined metric groups. For health sources such as Appdynamics, New Relic, and Dynatrace, metrics are automatically grouped based on transactions, making all transactions available in the dropdown filter. - **Nodes**: Using this filter, you can narrow down the results by selecting a specific "Canary" or "After" node, depending on the type of deployment. @@ -113,9 +115,7 @@ You can customize the metrics summary view using the following filters: - **Raw** and **Normalized**: Use the **Normalized** filter option to view the metric data in a standardized form used by ML analysis. The system performs a normalization process for ML analysis by converting three adjacent minutes of metric data into a single data point. Use the **Raw** filter option to see the metric data exactly as provided by the source, without any modifications. - - - + ![Normalized Data](static/normalizeddata.png) ## Metric details graph diff --git a/docs/continuous-delivery/verify/cv-results/static/normalizeddata.png b/docs/continuous-delivery/verify/cv-results/static/normalizeddata.png new file mode 100644 index 00000000000..814e63bbc1c Binary files /dev/null and b/docs/continuous-delivery/verify/cv-results/static/normalizeddata.png differ diff --git a/docs/continuous-delivery/x-platform-cd-features/cd-steps/ticketing-systems/create-jira-issues-in-cd-stages.md b/docs/continuous-delivery/x-platform-cd-features/cd-steps/ticketing-systems/create-jira-issues-in-cd-stages.md index 0b1e06e2587..1987b2386b5 100644 --- a/docs/continuous-delivery/x-platform-cd-features/cd-steps/ticketing-systems/create-jira-issues-in-cd-stages.md +++ b/docs/continuous-delivery/x-platform-cd-features/cd-steps/ticketing-systems/create-jira-issues-in-cd-stages.md @@ -39,6 +39,14 @@ To add a Jira Create step, do the following: 8. Select **Apply Changes**. 9. Select **Save**. +The fields that are required are determined by your definitions that are set in the project and issue in Jira. + +:::info Fields and Security +- Fields for a particular Issue Type must be modifiable in order to be added and set via Harness' Jira connector. For example, if the Requester Field cannot be edited, or has permission restrictions, then customers can expect an error message when setting the field. For example, `Fields {Reporter} are invalid for the provided jira issue type` +- Customers looking to set Requesters and Assignees must make sure they take into account Atlassian's settings and permissions to allow Assignees or Reporters to be searchable. If all other fields can be set, but no users can be listed, [please refer to the following information from Atlassian] (https://confluence.atlassian.com/jirakb/cannot-find-users-in-assignee-or-reporter-fields-779158856.html) +::: + + ## Add Issue fields In Jira fields, you can select specific fields within a Jira issue. For more information on custom fields, go to [Jira custom fields](https://support.atlassian.com/jira-cloud-administration/docs/custom-fields-types-in-company-managed-projects/). diff --git a/docs/continuous-delivery/x-platform-cd-features/environments/create-environment-groups.md b/docs/continuous-delivery/x-platform-cd-features/environments/create-environment-groups.md index 663fa9b4ac2..272116b7b97 100644 --- a/docs/continuous-delivery/x-platform-cd-features/environments/create-environment-groups.md +++ b/docs/continuous-delivery/x-platform-cd-features/environments/create-environment-groups.md @@ -6,8 +6,8 @@ sidebar_position: 3 Environment groups are simple a way to group environments so you can assign permissions to multiple environments in a role. -1. From your project, select **Environments**. -2. Select **Environment Groups** on the top of the page. +1. From your Project Settings, select **Environments**. +2. Select **Environment Groups** on the top-right corner of the page. 3. Select **New Environment Group**. ![create environment groups](./static/services-and-environments-overview-23.png) @@ -16,12 +16,16 @@ Environment groups are simple a way to group environments so you can assign perm :::note -You can only add environments in the same scope (Harness project, org, account) as the environment group. For example, if the environment group is configured at the project level, only environments in the project level can be configured for the group. +By default, you can only add environments in the same scope (Harness project, org, account) as the environment group. For example, if the environment group is configured at the project level, only environments in the project level can be configured for the group. + +You can also add environments created at the Account and Organization levels to the Environment Group. For more details, see [Cross Scope Environment Groups](/docs/continuous-delivery/x-platform-cd-features/environments/create-environment-groups#cross-scope-environment-groups) ::: ![environment groups](./static/environment-groups.png) +5. Click **Submit**. + Here is a sample environment group YAML. ``` @@ -37,8 +41,48 @@ environmentGroup: - dev_1679347042451 ``` -5. Select **Submit**. +## Cross Scope Environment Groups + +You can also add environments created at the Account and Organization levels to the **Environment Group**. + +:::note +Currently, the Cross Scope Environment Groups feature is behind the feature flag `CDS_CROSS_SCOPED_ENV_GROUPS`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. +::: + +1. From your Project Settings, select **Environments**. +2. Select **Environment Groups** at the top-right corner of the page. +3. Click **New Environment Group**. +4. Enter a name for the environment group. All the environments created at the **Project**, **Organization**, and **Account** levels will be displayed under the respective tabs. Select the environments needed in your Environment Group and click **Submit**. + +![Cross Scope Environment Groups](./static/cross_scoped_env_groups.png) + +Once the Environment group is created, you can see all the environments along with their scopes listed. + +![Cross Scope Environment Groups](./static/cross_scoped_env_groups-2.png) + +Below is an example of the YAML for an environment group: + +```yaml +environmentGroup: + name: demoEnvGroup + identifier: demoEnvGroup + description: "" + tags: {} + orgIdentifier: default + projectIdentifier: CD_Docs + envIdentifiers: + - test + - testenv1 + - org.testE + - account.CDCNGAuto_EnvNg59wFkWCjQQ + - account.CDCNGAuto_EnvNgdCz0sFChIM +``` + +Under `envIdentifiers`, +- The environments associated with the **Project** level do not have a prefix. +- The environments associated with the **Organization level** are prefixed with `org.` +- The environments associated with the **Account** level are prefixed with `account.` ## Using environment groups @@ -59,7 +103,6 @@ You can reference which environment group was selected using as an expression. I - How to handle infrastructures that have runtime input parameters when the expression is referenced? - How to deploy to a subset of those environments in the environment group? - How do we propagate the environment group as an expression to the following stage? - -- + diff --git a/docs/continuous-delivery/x-platform-cd-features/environments/static/cross_scoped_env_groups-2.png b/docs/continuous-delivery/x-platform-cd-features/environments/static/cross_scoped_env_groups-2.png new file mode 100644 index 00000000000..3dca5861538 Binary files /dev/null and b/docs/continuous-delivery/x-platform-cd-features/environments/static/cross_scoped_env_groups-2.png differ diff --git a/docs/continuous-delivery/x-platform-cd-features/environments/static/cross_scoped_env_groups.png b/docs/continuous-delivery/x-platform-cd-features/environments/static/cross_scoped_env_groups.png new file mode 100644 index 00000000000..3b7fd64148f Binary files /dev/null and b/docs/continuous-delivery/x-platform-cd-features/environments/static/cross_scoped_env_groups.png differ diff --git a/docs/continuous-delivery/x-platform-cd-features/executions/step-failure-strategy-settings.md b/docs/continuous-delivery/x-platform-cd-features/executions/step-failure-strategy-settings.md index aa8be5bf9ca..887d32f1ea0 100644 --- a/docs/continuous-delivery/x-platform-cd-features/executions/step-failure-strategy-settings.md +++ b/docs/continuous-delivery/x-platform-cd-features/executions/step-failure-strategy-settings.md @@ -45,6 +45,8 @@ For example, a failure strategy set on a step doesn't impact the failure strateg Both step and stage failure strategies include the **Rollback Stage** action option. There is no rollback step option. +When creating a step template, failure strategies such as Rollback Pipeline and Rollback Stage appear as options. These strategies only work if the stage where this step template is used is a deployment stage. + ## Failure strategy settings The following table lists the failure strategy actions and how they work at the step, step group, and stage levels. diff --git a/docs/continuous-delivery/x-platform-cd-features/services/artifact-sources.md b/docs/continuous-delivery/x-platform-cd-features/services/artifact-sources.md index a860037eb5d..73e0e37b157 100644 --- a/docs/continuous-delivery/x-platform-cd-features/services/artifact-sources.md +++ b/docs/continuous-delivery/x-platform-cd-features/services/artifact-sources.md @@ -54,6 +54,31 @@ When you hardcode the artifact in your manifests, any artifacts added to your Ha Configuring dependent fields, such as the artifact tag, as runtime inputs when the primary artifact is set as an expression is supported in YAML only and is not supported through the UI. ::: +### Skip Artifact Consumption for the Stage + +:::note +Currently, the disable artifact consumption check feature is behind the feature flag `CDS_ARTIFACT_DISABLE_VALIDATION`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. +::: + +You can bypass artifact consumption checks for a service in a **Deploy** stage by selecting the **Disable artifact in this stage** checkbox. When this option is enabled, the pipeline treats the service without artifact configuration, skips the artifact consumption check process. + +![](static/disable-artifact.png) + +This feature works for: + - **Primary Artifacts:** Applies to both single and multiple primary artifact services. If multiple primary artifacts are used, the checkbox applies to all primary artifacts. +- **Sidecar Artifacts**: Artifact consumption check is also skipped for sidecar artifacts when this checkbox is enabled. + +Additionally, this logic applies when the same service is propagated to subsequent stages: + - If you enable **Disable artifact in this stage** for a particular service in one stage and propagate that service to another stage, artifact consumption check will also be skipped in the propagated stage. + - If the **Disable artifact in this stage** checkbox is enabled, and the artifact or artifact tag is configured as runtime inputs, the pipeline will not prompt for the artifact or artifact tag during execution. + + ![](static/disable-artifact-2.png) + +:::warning + +If the **Disable artifact in this stage** checkbox is enabled, any steps in the stage that require an artifact are likely to fail. Ensure that no steps in the stage depend on an artifact +::: + ## Docker ### Use artifacts in any Docker registry @@ -3172,10 +3197,10 @@ The following table lists how many artifact versions Harness displays in its UI | Google Container Registry | No Limit | Lexical (descending) | | AWS ECR | No Limit | Lexical (descending) | | Azure Container Registry | 500 | Lexical (descending) | -| Google Artifact Registry(Docker) | 2,147,483,647 | Descending order of created at. | +| Google Artifact Registry(Docker) | 2,147,483,647 | Descending order of last modified | | Artifactory(Docker) | No Limit | Lexical (descending) | -| Artifactory(Generic) | 10000 | Descending order of created at. | -| Github Packages | No Limit | Descending order of created at. | +| Artifactory(Generic) | 10000 | Descending order of created | +| Github Packages | No Limit | Descending order of created | | Nexus3(Docker) | 50 | Descending order of last modified at (3.46.0 and newer). Alphabetically descending for older versions. | | Nexus3(non-Docker) | 2,147,483,647 | Descending order of last modified at (3.46.0 and newer). Alphabetically descending for older versions. | | Nexus2 | No Limit | Lexical (descending) | @@ -3369,4 +3394,10 @@ If the artifact image differs from the one used in the previous successful deplo **Connector Changes**: If the connector configuration has changed but the tag remains the same, an error will be thrown to ensure consistency and avoid conflicts. -Currently, the expression `<+lastSuccessfulDeployed.tag>` is limited to resolving primary artifact tags and does not support resolving sidecar artifact tags. \ No newline at end of file +Currently, the expression `<+lastSuccessfulDeployed.tag>` is limited to resolving primary artifact tags and does not support resolving sidecar artifact tags. + +### Latest Artifact Tag Sample + +To see an example of how to use Latest Artifact Tag in Harness pipeline, visit the [Harness Community Repository](https://github.com/harness-community/harnesscd-example-apps/tree/master/cd-features/last-successful-artifact-tag). + +This repository provides a ready-to-use sample application and the necessary configuration files to help you get started quickly. \ No newline at end of file diff --git a/docs/continuous-delivery/x-platform-cd-features/services/cd-artifact-sources-faqs.md b/docs/continuous-delivery/x-platform-cd-features/services/cd-artifact-sources-faqs.md index e38fc3bcc9c..1f9fcb273d3 100644 --- a/docs/continuous-delivery/x-platform-cd-features/services/cd-artifact-sources-faqs.md +++ b/docs/continuous-delivery/x-platform-cd-features/services/cd-artifact-sources-faqs.md @@ -6,6 +6,16 @@ Harness supports all of the common repos. See [Connect to an artifact repo](/docs/platform/connectors/artifact-repositories/connect-to-an-artifact-repo). +### How is Last Successful Deployment Tag different from Post-Production Rollback? + +**Last Successful Deployment Tag** `<+lastSuccessfulDeployed.tag>` focuses on deploying the most recent artifact that was successfully deployed. + +**Post-Production Rollback**, on the other hand, is designed to revert the application to a previous stable version in production after a deployment issue is detected. It’s a recovery mechanism to restore a previous known good state. + +**Usecase** +- Use Last Successful Deployment when you want to deploy the same version that was last known to work without introducing a new artifact. It’s particularly useful for consistent deployments during iterative updates. +- Use Post-Production Rollback when a newly deployed artifact causes issues or breaks functionality, and you need to return the application to a stable, previously deployed state. + ### How do I list Github Tags for custom artifact when the curl returns a json array without any root element? We cannot provide an array directly to the custom artifact. It needs a root element to parse the json response. diff --git a/docs/continuous-delivery/x-platform-cd-features/services/static/disable-artifact-2.png b/docs/continuous-delivery/x-platform-cd-features/services/static/disable-artifact-2.png new file mode 100644 index 00000000000..544f512812e Binary files /dev/null and b/docs/continuous-delivery/x-platform-cd-features/services/static/disable-artifact-2.png differ diff --git a/docs/continuous-delivery/x-platform-cd-features/services/static/disable-artifact.png b/docs/continuous-delivery/x-platform-cd-features/services/static/disable-artifact.png new file mode 100644 index 00000000000..c351257631b Binary files /dev/null and b/docs/continuous-delivery/x-platform-cd-features/services/static/disable-artifact.png differ diff --git a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-intelligence.md b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-intelligence.md index 94d411cc3de..58f81663913 100644 --- a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-intelligence.md +++ b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-intelligence.md @@ -11,6 +11,11 @@ Build Intelligence is part of [Harness CI Intelligence](/docs/continuous-integra Build Intelligence is currently available for **Gradle** and **Bazel** build tools, with Maven support coming soon. Regardless of the programming language used in your projects, as long as you're building with a supported build tool, you can leverage Build Intelligence to optimize your builds. +:::info +Build Intelligence is now Generally Available (GA). +If this feature is not yet enabled in your account, please reach out to [Harness Support](mailto:support@harness.io) for assistance. +::: + ## Using Build Intelligence Build Intelligence seamlessly integrates into your workflow without requiring changes to your build commands. Harness automatically detects supported build tools in your pipeline and injects the necessary configurations into the relevant files within the build workspace. This ensures Build Intelligence optimizes your builds during Gradle or Bazel operations performed in `Test` or `Run` steps. @@ -53,11 +58,15 @@ Below is an example of a CI stage using Build Intelligence: :::info -- Build Intelligence feature is behind the feature flags `CI_CACHE_ENABLED` and `CI_ENABLE_BUILD_CACHE_HOSTED_VM`. - -Contact [Harness Support](mailto:support@harness.io) to enable the feature. +- If this feature is not yet enabled in your account, please reach out to [Harness Support](mailto:support@harness.io) to enable feature flags `CI_CACHE_ENABLED` and `CI_ENABLE_BUILD_CACHE_HOSTED_VM`. ::: +The cache storage limit depends on your subscription plan type. Please visit [Subscriptions and licenses](/docs/continuous-integration/get-started/ci-subscription-mgmt.md#usage-limits) page to learn more about usage limits. + +Harness doesn't limit the number of caches you can store, but, once you reach your storage limit, Harness continues to save new caches by automatically evicting old caches. + +The cache retention window is 15 days, which resets whenever a cache is updated. +
@@ -67,9 +76,10 @@ Contact [Harness Support](mailto:support@harness.io) to enable the feature. :::info - Build Intelligence is only supported for Kubernetes on self-hosted build infrastructure. - To use Build Intelligence with self-hosted builds the following feature flags need to be enabled: - `CI_CACHE_ENABLED` and `CI_ENABLE_BUILD_CACHE_K8` To authenticate to your S3 bucket using OIDC enabled `PL_GCP_OIDC_AUTHENTICATION` for GCP or `CDS_AWS_OIDC_AUTHENTICATION` for AWS. + `CI_CACHE_ENABLED` and `CI_ENABLE_BUILD_CACHE_K8` + - To authenticate to your S3 bucket using OIDC, feature flags `PL_GCP_OIDC_AUTHENTICATION` for GCP or `CDS_AWS_OIDC_AUTHENTICATION` for AWS are required. - Contact [Harness Support](mailto:support@harness.io) to enable the feature. + Contact [Harness Support](mailto:support@harness.io) to enable the feature, if not already available in your account. ::: - When using a Build Intelligence with self-hosted infrastructure, an S3-compatible bucket is required for cache storage. Please visit [configure default S3-compatible object storage](/docs/platform/settings/default-settings.md#continuous-integration) for more information. diff --git a/docs/continuous-integration/use-ci/caching-ci-data/cache-intelligence.md b/docs/continuous-integration/use-ci/caching-ci-data/cache-intelligence.md index e87e85ce724..dd9a32c1dca 100644 --- a/docs/continuous-integration/use-ci/caching-ci-data/cache-intelligence.md +++ b/docs/continuous-integration/use-ci/caching-ci-data/cache-intelligence.md @@ -15,7 +15,8 @@ With **Cache Intelligence**, a [Harness CI Intelligence](/docs/continuous-integr You can use Cache Intelligence with any [build infrastructure](/docs/continuous-integration/use-ci/set-up-build-infrastructure/which-build-infrastructure-is-right-for-me.md). :::note -Cache intelligence for self-managed build infrastructure is an early access feature and is behind the feature flag `CI_ENABLE_CACHE_INTEL_SELF_HOSTED`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. +Cache Intelligence is now Generally Available (GA). +If this feature is not yet enabled in your account, please reach out to [Harness Support](mailto:support@harness.io) for assistance. ::: @@ -52,11 +53,7 @@ When you use Cache Intelligence with [Harness CI Cloud](/docs/continuous-integra All pipelines in the account use the same cache storage, and each build tool has a unique cache key that is used to restore the appropriate cache data at runtime. -The cache storage limit depends on your subscription plan type: - -* Free: 2 GB -* Team: 5 GB -* Enterprise: 10 GB +The cache storage limit depends on your subscription plan type. Please visit [Subscriptions and licenses](/docs/continuous-integration/get-started/ci-subscription-mgmt.md#usage-limits) page to learn more about usage limits. Harness doesn't limit the number of caches you can store, but, once you reach your storage limit, Harness continues to save new caches by automatically evicting old caches. diff --git a/docs/continuous-integration/use-ci/caching-ci-data/docker-layer-caching.md b/docs/continuous-integration/use-ci/caching-ci-data/docker-layer-caching.md index 31d9aa640f8..c066f7617f9 100644 --- a/docs/continuous-integration/use-ci/caching-ci-data/docker-layer-caching.md +++ b/docs/continuous-integration/use-ci/caching-ci-data/docker-layer-caching.md @@ -20,8 +20,11 @@ With **Docker Layer Caching (DLC)** , a [Harness CI Intelligence](/docs/continuo You can use DLC with any [build infrastructure](/docs/continuous-integration/use-ci/set-up-build-infrastructure/which-build-infrastructure-is-right-for-me.md). When you use DLC with Harness CI Cloud, the cache is stored in the Harness-managed environment. -:::note -Docker Layer Caching for self-managed build infrastructure is an early access feature and is behind the feature flag `CI_ENABLE_DLC_SELF_HOSTED`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. +:::info + +Docker Layer Caching is now Generally Available (GA). + +If this feature is not yet enabled in your account, please reach out to [Harness Support](mailto:support@harness.io) for assistance. ::: @@ -37,11 +40,7 @@ When you use Docker Layer Caching with [Harness CI Cloud](/docs/continuous-integ All pipelines in the account use the same cache storage, and each build tool has a unique cache key that is used to restore the appropriate cache data at runtime. -The cache storage limit depends on your subscription plan type: - -* Free: 2 GB -* Team: 5 GB -* Enterprise: 10 GB +The cache storage limit depends on your subscription plan type. Please visit [Subscriptions and licenses](/docs/continuous-integration/get-started/ci-subscription-mgmt.md#usage-limits) page to learn more about usage limits. Harness doesn't limit the number of caches you can store, but, once you reach your storage limit, Harness continues to save new caches by automatically evicting old caches. @@ -57,6 +56,11 @@ If your storage isn't S3-compatible or your don't want to use access key and sec We suggest that you consider setting bucket level retention policy for efficient cache management. +:::info +Enabling DLC when running on Kubernetes requires *privileged mode* on the cluster where the builds run. +::: + +
@@ -84,9 +88,6 @@ Here is a YAML example of a **Build and Push an image to Docker Registry** step - <+pipeline.sequenceId> ``` -:::info -Enabling DLC when running on Kubernetes requires *privileged mode* on the cluster where the builds run. -::: ## Remote cache image diff --git a/docs/continuous-integration/use-ci/codebase-configuration/create-and-configure-a-codebase.md b/docs/continuous-integration/use-ci/codebase-configuration/create-and-configure-a-codebase.md index ac909e109e6..84bd1e9a1ee 100644 --- a/docs/continuous-integration/use-ci/codebase-configuration/create-and-configure-a-codebase.md +++ b/docs/continuous-integration/use-ci/codebase-configuration/create-and-configure-a-codebase.md @@ -104,9 +104,9 @@ For more information about Build stage settings, go to [CI Build stage settings] ## Edit the default codebase configuration :::note +Git Clone enhancements listed below are now Generally Available (GA) -We've recently enhanced the Git clone operations within Harness, in both the Git Clone step and the native Clone Codebase functionality. Support was added for : - +Harness has introduced several powerful enhancements to Git clone operations, available in both the Git Clone step and the native Clone Codebase functionality. These include: - Git LFS - Allows users to clone repositories with large file storage (LFS) efficiently. - Fetch Tags - Enables fetching of tags during the clone operation. @@ -115,9 +115,7 @@ We've recently enhanced the Git clone operations within Harness, in both the Git - Clone Path Customization - Exposes the clone path in the codebase section, allowing users to specify a custom clone directory. - Additional Pre-Fetch Command - Ability to specify any additional Git commands to run before fetching the code. - -These capabilites are behind feature flag `CI_GIT_CLONE_ENHANCED`. If it is not available in your account, contact [Harness Support](mailto:support@harness.io) to enable the feature. - +If these capabilities are not yet enabled in your account, please reach out to [Harness Support](mailto:support@harness.io) for assistance. ::: import Tabs from '@theme/Tabs'; diff --git a/docs/continuous-integration/use-ci/codebase-configuration/git-clone-step.md b/docs/continuous-integration/use-ci/codebase-configuration/git-clone-step.md index 59666aa0bed..175530db4dd 100644 --- a/docs/continuous-integration/use-ci/codebase-configuration/git-clone-step.md +++ b/docs/continuous-integration/use-ci/codebase-configuration/git-clone-step.md @@ -14,9 +14,9 @@ This topic describes how to use the **Git Clone** step included in Harness Conti For example, assume the default codebase is a repo that contains app code files, and the Dockerfile necessary to build the app image is in a different repo. You can use a **Git Clone** or **Run** step to clone the second repo into the workspace. Then, you can use a **Build and Push** step to build and push an image using files from both repos. :::note +Git Clone enhancements listed below are now Generally Available (GA) -We've recently enhanced the Git clone operations within Harness, in both the Git Clone step and the native Clone Codebase functionality. Support was added for : - +Harness has introduced several powerful enhancements to Git clone operations, available in both the Git Clone step and the native Clone Codebase functionality. These include: - Git LFS - Allows users to clone repositories with large file storage (LFS) efficiently. - Fetch Tags - Enables fetching of tags during the clone operation. @@ -25,9 +25,7 @@ We've recently enhanced the Git clone operations within Harness, in both the Git - Clone Path Customization - Exposes the clone path in the codebase section, allowing users to specify a custom clone directory. - Additional Pre-Fetch Command - Ability to specify any additional Git commands to run before fetching the code. - -These capabilites are behind feature flag `CI_GIT_CLONE_ENHANCED`. If it is not available in your account, contact [Harness Support](mailto:support@harness.io) to enable the feature. - +If these capabilities are not yet enabled in your account, please reach out to [Harness Support](mailto:support@harness.io) for assistance. ::: Add a **Git Clone** step to clone a second repo into the pipeline's workspace. diff --git a/docs/continuous-integration/use-ci/run-tests/tests-v2.md b/docs/continuous-integration/use-ci/run-tests/tests-v2.md index 8c793c21ee5..384e9ba1b13 100644 --- a/docs/continuous-integration/use-ci/run-tests/tests-v2.md +++ b/docs/continuous-integration/use-ci/run-tests/tests-v2.md @@ -12,14 +12,12 @@ import TabItem from '@theme/TabItem'; You can use this **Test Intelligence** step, also known as the **Test** step, to run unit tests with **Python**, **Ruby**, **Java** , **C#** , **Scala** and **Kotlin** programming languages. -:::note - -Currently, the Test step is behind the feature flag `CIE_ENABLE_RUNTEST_V2`. If the **Test** step is not available in your account, contact [Harness Support](mailto:support@harness.io) to enable the feature. - -C# (.Net Core 6.0+) support is also in early access and would require a specific environment variable to be used. If you wish to use the .Net test selection, please contact [Harness Support](mailto:support@harness.io) to enable it within your pipeline. - +:::info +Test Intelligence is now Generally Available (GA). +If this feature is not yet enabled in your account, please reach out to [Harness Support](mailto:support@harness.io) for assistance. ::: + :::info To use TI for Python, your codebase must be Python 3. @@ -53,7 +51,7 @@ Add the **Test** step to the [Build stage](/docs/continuous-integration/use-ci/s ### Metadata -* **Name:** Enter a name summarizing the step's purpose. Harness automatically assigns an [ID](/docs/platform/references/entity-identifier-reference.md) based on the **Name**. +* **Name:** Enter a name summarizing the step's purpose. Harness automatically assigns an ID based on the **Name**. * **Description:** Optional text string describing the step's purpose. ### Container Registry and Image diff --git a/docs/continuous-integration/use-ci/run-tests/ti-overview.md b/docs/continuous-integration/use-ci/run-tests/ti-overview.md index 2a882e57f40..9b610cc6ef9 100644 --- a/docs/continuous-integration/use-ci/run-tests/ti-overview.md +++ b/docs/continuous-integration/use-ci/run-tests/ti-overview.md @@ -24,8 +24,10 @@ Harness Test Intelligence (TI) improves unit test time by running only the unit You can used Test Intelligence with **Python**, **Java** , **Ruby**, **C#**, **Kotlin**, or **Scala** programming languages :::info +- Test Intelligence is now Generally Available (GA). +If this feature is not yet enabled in your account, please reach out to [Harness Support](mailto:support@harness.io) for assistance. -Test Intelligence applies to unit testing only. For other types of tests, [use Run steps](../run-step-settings.md). +- Test Intelligence applies to unit testing only. For other types of tests, [use Run steps](../run-step-settings.md). ::: @@ -59,7 +61,12 @@ Test Intelligence is comprised of a TI service, a Test Runner Agent, and the **T ## Enable Test Intelligence (TI) -We recommend using [ **Test**](./tests-v2.md) Step (Test Intelligence v2) to run your tests. This is a newer, simplified version of Test Intelligence, where you do not need to change your test commands - Test Intelligence will be automatically configured for you in runtime, making it easier to use. +:::tip + +We recommend using [**Test**](./tests-v2.md) Step (Test Intelligence v2) to run your tests. This is a newer, simplified version of Test Intelligence, where you do not need to change your test commands - Test Intelligence will be automatically configured for you in runtime, making it easier to use. + +::: + For instructions on using the Test Intelligence (v2), go to: [Test Intelligence step](./tests-v2.md) diff --git a/docs/continuous-integration/use-ci/set-up-build-infrastructure/define-a-docker-build-infrastructure.md b/docs/continuous-integration/use-ci/set-up-build-infrastructure/define-a-docker-build-infrastructure.md index 9e9fa6cf334..ea4735c3ccd 100644 --- a/docs/continuous-integration/use-ci/set-up-build-infrastructure/define-a-docker-build-infrastructure.md +++ b/docs/continuous-integration/use-ci/set-up-build-infrastructure/define-a-docker-build-infrastructure.md @@ -108,7 +108,7 @@ The Harness Docker Runner service performs the build work. The delegate needs th 4. Start the runner binary, for example: ``` - sudo ./harness-docker-runner-linux-arm64 server + sudo -E ./harness-docker-runner-linux-arm64 server ``` ### Define build infrastructure @@ -176,7 +176,7 @@ For more information about delegates and delegate installation, go to [Delegate The Harness Docker Runner service performs the build work. The delegate needs the runner to run CI builds. 1. Download a [Harness Docker Runner executable](https://github.com/harness/harness-docker-runner/releases) corresponding to your build farm's OS and architecture. -2. (Optional) To use self-signed certificates, export `CI_MOUNT_VOLUMES` along with a comma-separated list of source paths and destination paths formatted as `path/to/source:path/to/destination`: +2. (Optional) To use self-signed certificates, export `CI_MOUNT_VOLUMES` along with a comma-separated list of source paths and destination paths formatted as `path/to/source;path/to/destination`: ``` export CI_MOUNT_VOLUMES="path/to/local/cert;/etc/ssl/certs/ca-certificates.crt,path/to/local/cert2;/etc/ssl/certs/cacerts.pem" @@ -197,7 +197,7 @@ The Harness Docker Runner service performs the build work. The delegate needs th 4. Start the runner binary, for example: ``` - ./harness-docker-runner-darwin-amd64 server + sudo -E ./harness-docker-runner-darwin-amd64 server ``` 5. If [macOS Gatekeeper](https://support.apple.com/en-us/HT202491) stops the installation because it can't check for malicious software, you need to modify **Security & Privacy** settings to allow this app to run. @@ -304,7 +304,7 @@ Install the Harness Docker Runner on the Windows machine that you specified in t 1. Download the Windows [Harness Docker Runner executable](https://github.com/harness/harness-docker-runner/releases) on the Windows machine where you want to run the Harness Docker Runner. This should be the Windows machine that you specified in the delegate's `RUNNER_URL`. 2. Use PowerShell to run these commands. Open a terminal with Administrator privileges. -3. (Optional) To use self-signed certificates, set `CI_MOUNT_VOLUMES` along with a comma-separated list of source paths and destination paths formatted as `path/to/source:path/to/destination`, for example: +3. (Optional) To use self-signed certificates, set `CI_MOUNT_VOLUMES` along with a comma-separated list of source paths and destination paths formatted as `path/to/source;path/to/destination`, for example: ``` $env:CI_MOUNT_VOLUMES="C:\Users\installer\Downloads\certs;C:/Users/ContainerAdministrator/.jfrog/security/certs" diff --git a/docs/continuous-integration/use-ci/set-up-build-infrastructure/use-harness-cloud-build-infrastructure.md b/docs/continuous-integration/use-ci/set-up-build-infrastructure/use-harness-cloud-build-infrastructure.md index 8cf51bd5940..a2ad0ecc11f 100644 --- a/docs/continuous-integration/use-ci/set-up-build-infrastructure/use-harness-cloud-build-infrastructure.md +++ b/docs/continuous-integration/use-ci/set-up-build-infrastructure/use-harness-cloud-build-infrastructure.md @@ -151,9 +151,9 @@ Currently, macOS platforms for Harness Cloud are behind a feature flag with limi ### Using Resource Classes You can use the yaml editor to change the cloud machine size. -:::info - -Currently, specifying machine size is behind the feature flag CI_ENABLE_RESOURCE_CLASSES. You can [submit a request to enable the feature](https://forms.gle/CWCcuE3nxqEdFJcZ6). +:::note +Resource Classes support is now Generally Available (GA). +If this feature is not yet enabled in your account, please reach out to [Harness Support](mailto:support@harness.io) for assistance. ::: To select a resource class size, please set the desired size as value for `size` property in the CI stge cloud infrastructure runtime configuration. For example: diff --git a/docs/continuous-integration/use-ci/set-up-build-infrastructure/vm-build-infrastructure/set-up-an-aws-vm-build-infrastructure.md b/docs/continuous-integration/use-ci/set-up-build-infrastructure/vm-build-infrastructure/set-up-an-aws-vm-build-infrastructure.md index 81e99fda49e..d9479890b3b 100644 --- a/docs/continuous-integration/use-ci/set-up-build-infrastructure/vm-build-infrastructure/set-up-an-aws-vm-build-infrastructure.md +++ b/docs/continuous-integration/use-ci/set-up-build-infrastructure/vm-build-infrastructure/set-up-an-aws-vm-build-infrastructure.md @@ -13,14 +13,20 @@ import TabItem from '@theme/TabItem'; -:::note +:::warning -Currently, this feature is behind the Feature Flag `CI_VM_INFRASTRUCTURE`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. +This feature will be deprecated on May 1, 2025 and replaced with an improved VM cluster manager. If you have any questions, please contact your account representative or [Harness Support](mailto:support@harness.io). ::: This topic describes how to use AWS VMs as Harness CI build infrastructure. To do this, you will create an Ubuntu VM and install a Harness Delegate and Drone VM Runner on it. The runner creates VMs dynamically in response to CI build requests. You can also configure the runner to hibernate AWS Linux and Windows VMs when they aren't needed. +:::note + +Currently, this feature is behind the Feature Flag `CI_VM_INFRASTRUCTURE`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. + +::: + This is one of several CI build infrastructure options. For example, you can also [set up a Kubernetes cluster build infrastructure](../k8s-build-infrastructure/set-up-a-kubernetes-cluster-build-infrastructure.md). The following diagram illustrates a CI build farm using AWS VMs. The [Harness Delegate](/docs/platform/delegates/delegate-concepts/delegate-overview) communicates directly with your Harness instance. The [VM runner](https://docs.drone.io/runner/vm/overview/) maintains a pool of VMs for running builds. When the delegate receives a build request, it forwards the request to the runner, which runs the build on an available VM. @@ -31,15 +37,15 @@ The following diagram illustrates a CI build farm using AWS VMs. The [Harness De This is an advanced configuration. Before beginning, you should be familiar with: -* Using the AWS EC2 console and interacting with AWS VMs. -* [Harness key concepts](/docs/platform/get-started/key-concepts.md) -* [CI pipeline creation](../../prep-ci-pipeline-components.md) -* [Harness Delegates](/docs/platform/delegates/delegate-concepts/delegate-overview) -* Drone VM Runners and pools: - * [Drone documentation - VM runner overview](https://docs.drone.io/runner/vm/overview/) - * [Drone documentation - Drone Pool](https://docs.drone.io/runner/vm/configuration/pool/) - * [Drone documentation - Amazon drivers](https://docs.drone.io/runner/vm/drivers/amazon/) - * [GitHub repository - Drone runner AWS](https://github.com/drone-runners/drone-runner-aws) +- Using the AWS EC2 console and interacting with AWS VMs. +- [Harness key concepts](/docs/platform/get-started/key-concepts.md) +- [CI pipeline creation](../../prep-ci-pipeline-components.md) +- [Harness Delegates](/docs/platform/delegates/delegate-concepts/delegate-overview) +- Drone VM Runners and pools: + - [Drone documentation - VM runner overview](https://docs.drone.io/runner/vm/overview/) + - [Drone documentation - Drone Pool](https://docs.drone.io/runner/vm/configuration/pool/) + - [Drone documentation - Amazon drivers](https://docs.drone.io/runner/vm/drivers/amazon/) + - [GitHub repository - Drone runner AWS](https://github.com/drone-runners/drone-runner-aws) ::: @@ -110,13 +116,13 @@ You may need to add the runner spec to the delegate definition: 1. Append the following to the end of the `docker-compose.yaml` file: ```yaml - drone-runner-aws: - restart: unless-stopped + drone-runner-aws: + restart: unless-stopped image: drone/drone-runner-aws - network_mode: "host" - volumes: - - /runner:/runner - entrypoint: ["/bin/drone-runner-aws", "delegate", "--pool", "pool.yml"] + network_mode: "host" + volumes: + - /runner:/runner + entrypoint: ["/bin/drone-runner-aws", "delegate", "--pool", "pool.yml"] working_dir: /runner ``` @@ -170,7 +176,7 @@ instances: iam_profile_arn: arn:aws:iam::XXXX:instance-profile/XXXXX network: security_groups: - - sg-XXXXXXXXXXX + - sg-XXXXXXXXXXX - name: windows-ci-pool ## The settings nested below this define the Windows pool. default: true type: amazon @@ -190,68 +196,68 @@ instances: hibernate: true network: security_groups: - - sg-XXXXXXXXXXXXXX + - sg-XXXXXXXXXXXXXX ``` ### Pool settings reference You can configure the following settings in your `pool.yml` file. You can also learn more in the Drone documentation for the [Pool File](https://docs.drone.io/runner/vm/configuration/pool/) and [Amazon drivers](https://docs.drone.io/runner/vm/drivers/amazon/). -| Setting | Type | Example | Description | -| ------- | ---- | ------- | ----------- | -| `name` | String | `name: windows_pool` | Unique identifier of the pool. You will need to specify this pool name in Harness when you [set up the CI stage build infrastructure](#specify-build-infrastructure). | -| `pool` | Integer | `pool: 1` | Warm pool size number. Denotes the number of VMs in ready state to be used by the runner. | -| `limit` | Integer | `limit: 3` | Maximum number of VMs the runner can create at any time. `pool` indicates the number of warm VMs, and the runner can create more VMs on demand up to the `limit`.
For example, assume `pool: 3` and `limit: 10`. If the runner gets a request for 5 VMs, it immediately provisions the 3 warm VMs (from `pool`) and provisions 2 more, which are not warm and take time to initialize. | -| `platform` | Key-value pairs, strings | Go to [platform example](#platform-example). | Specify VM platform operating system (`os: linux` or `os: windows`). `arch` and `variant` are optional. `os_name: amazon-linux` is required for AL2 AMIs. The default configuration is `os: linux` and `arch: amd64`. | -| `spec` | Key-value pairs, various | Go to [Example pool.yml](#example-poolyml) and the examples in the following rows. | Configure settings for the build VMs and AWS instance. Contains a series of individual and mapped settings, including `account`, `tags`, `ami`, `size`, `hibernate`, `iam_profile_arn`, `network`, `user_data`, `user_data_path`, and `disk`. Details about these settings are provided below. | -| `account` | Key-value pairs, strings | Go to [account example](#account-example). | AWS account configuration, including region and access key authentication.
  • `region`: AWS region. To minimize latency, use the same region as the delegate VM.
  • `availability_zone`: AWS region availability zone. To minimize latency, use the same availability zone as the delegate VM.
  • `access_key_id`: The AWS access key for authentication. If using an IAM role, this is the access key associated with the IAM role.
  • `access_key_secret`: The secret associated with the specified `access_key_id`.
  • `key_pair_name`: The key pair name specified when you set up the EC2 instance. Don't include `.pem`.
| -| `tags` | Key-vale pairs, strings | Go to [tags example](#tags-example). | Optional tags to apply to the instance. | -| `ami` | String | `ami: ami-092f63f22143765a3` | The AMI ID. You can use the same AMI as your EC2 instance or [search for AMIs](https://cloud-images.ubuntu.com/locator/ec2/) in your Availability Zone for supported models (Ubuntu, AWS Linux, Windows 2019+). AMI IDs differ by Availability Zone. | -| `size` | String | `size: t3.large` | The AMI size, such as `t2.nano`, `t2.micro`, `m4.large`, and so on. Make sure the size is large enough to handle your builds. | -| `hibernate` | Boolean | `hibernate: true` | When set to `true` (which is the default), VMs hibernate after startup. When `false`, VMs are always in a running state. This option is supported for AWS Linux and Windows VMs. Hibernation for Ubuntu VMs is not currently supported. For more information, go to the AWS documentation on [hibernating on-demand Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html). | -| `iam_profile_arn` | String | `iam_profile_arn: arn:aws:iam::XXXX:instance-profile/XXX` | If using IAM roles, this is the instance profile ARN of the IAM role to apply to the build instances. | -| `network` | Key-value pairs, various | Go to [network example](#network-example). | AWS network information, including security groups. For more information on these attributes, go to the AWS documentation on [creating security groups](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/get-set-up-for-amazon-ec2.html#create-a-base-security-group).
  • `security_groups`: List of security group IDs as strings.
  • `vpc`: If using VPC, this is the VPC ID as an integer.
  • `vpc_security_groups`: If using VPC, this is a list of VPC security group IDs as strings.
  • `private_ip`: Boolean.
  • `subnet_id`: The subnet ID as a string.
| -| `user_data` or `user_data_path` | Key-value pairs, strings | Go to [user data example](#user-data-example). | Define custom user data to apply to the instance. Provide [cloud-init data](https://docs.drone.io/runner/vm/configuration/cloud-init/) either directly in `user_data` or as a path to a file in `user_data_path`. | -| `disk` | Key-value pairs, various | Go to [disk example](#disk-example). | Optional AWS block information.
  • `size`: Integer, size in GB.
  • `type`: `gp2`, `io1`, or `standard`.
  • `iops`: If `type: io1`, then `iops: iops`.
| +| Setting | Type | Example | Description | +| ------------------------------- | ------------------------ | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | String | `name: windows_pool` | Unique identifier of the pool. You will need to specify this pool name in Harness when you [set up the CI stage build infrastructure](#specify-build-infrastructure). | +| `pool` | Integer | `pool: 1` | Warm pool size number. Denotes the number of VMs in ready state to be used by the runner. | +| `limit` | Integer | `limit: 3` | Maximum number of VMs the runner can create at any time. `pool` indicates the number of warm VMs, and the runner can create more VMs on demand up to the `limit`.
For example, assume `pool: 3` and `limit: 10`. If the runner gets a request for 5 VMs, it immediately provisions the 3 warm VMs (from `pool`) and provisions 2 more, which are not warm and take time to initialize. | +| `platform` | Key-value pairs, strings | Go to [platform example](#platform-example). | Specify VM platform operating system (`os: linux` or `os: windows`). `arch` and `variant` are optional. `os_name: amazon-linux` is required for AL2 AMIs. The default configuration is `os: linux` and `arch: amd64`. | +| `spec` | Key-value pairs, various | Go to [Example pool.yml](#example-poolyml) and the examples in the following rows. | Configure settings for the build VMs and AWS instance. Contains a series of individual and mapped settings, including `account`, `tags`, `ami`, `size`, `hibernate`, `iam_profile_arn`, `network`, `user_data`, `user_data_path`, and `disk`. Details about these settings are provided below. | +| `account` | Key-value pairs, strings | Go to [account example](#account-example). | AWS account configuration, including region and access key authentication.
  • `region`: AWS region. To minimize latency, use the same region as the delegate VM.
  • `availability_zone`: AWS region availability zone. To minimize latency, use the same availability zone as the delegate VM.
  • `access_key_id`: The AWS access key for authentication. If using an IAM role, this is the access key associated with the IAM role.
  • `access_key_secret`: The secret associated with the specified `access_key_id`.
  • `key_pair_name`: The key pair name specified when you set up the EC2 instance. Don't include `.pem`.
| +| `tags` | Key-vale pairs, strings | Go to [tags example](#tags-example). | Optional tags to apply to the instance. | +| `ami` | String | `ami: ami-092f63f22143765a3` | The AMI ID. You can use the same AMI as your EC2 instance or [search for AMIs](https://cloud-images.ubuntu.com/locator/ec2/) in your Availability Zone for supported models (Ubuntu, AWS Linux, Windows 2019+). AMI IDs differ by Availability Zone. | +| `size` | String | `size: t3.large` | The AMI size, such as `t2.nano`, `t2.micro`, `m4.large`, and so on. Make sure the size is large enough to handle your builds. | +| `hibernate` | Boolean | `hibernate: true` | When set to `true` (which is the default), VMs hibernate after startup. When `false`, VMs are always in a running state. This option is supported for AWS Linux and Windows VMs. Hibernation for Ubuntu VMs is not currently supported. For more information, go to the AWS documentation on [hibernating on-demand Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html). | +| `iam_profile_arn` | String | `iam_profile_arn: arn:aws:iam::XXXX:instance-profile/XXX` | If using IAM roles, this is the instance profile ARN of the IAM role to apply to the build instances. | +| `network` | Key-value pairs, various | Go to [network example](#network-example). | AWS network information, including security groups. For more information on these attributes, go to the AWS documentation on [creating security groups](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/get-set-up-for-amazon-ec2.html#create-a-base-security-group).
  • `security_groups`: List of security group IDs as strings.
  • `vpc`: If using VPC, this is the VPC ID as an integer.
  • `vpc_security_groups`: If using VPC, this is a list of VPC security group IDs as strings.
  • `private_ip`: Boolean.
  • `subnet_id`: The subnet ID as a string.
| +| `user_data` or `user_data_path` | Key-value pairs, strings | Go to [user data example](#user-data-example). | Define custom user data to apply to the instance. Provide [cloud-init data](https://docs.drone.io/runner/vm/configuration/cloud-init/) either directly in `user_data` or as a path to a file in `user_data_path`. | +| `disk` | Key-value pairs, various | Go to [disk example](#disk-example). | Optional AWS block information.
  • `size`: Integer, size in GB.
  • `type`: `gp2`, `io1`, or `standard`.
  • `iops`: If `type: io1`, then `iops: iops`.
| #### platform example ```yaml - instance: - platform: - os: linux - arch: amd64 - version: - os_name: amazon-linux +instance: + platform: + os: linux + arch: amd64 + version: + os_name: amazon-linux ``` #### account example ```yaml - account: - region: us-east-2 - availability_zone: us-east-2c - access_key_id: XXXXX - access_key_secret: XXXXX - key_pair_name: XXXXX +account: + region: us-east-2 + availability_zone: us-east-2c + access_key_id: XXXXX + access_key_secret: XXXXX + key_pair_name: XXXXX ``` #### tags example ```yaml - tags: - owner: USER - ttl: '-1' +tags: + owner: USER + ttl: "-1" ``` #### network example ```yaml - network: - private_ip: true - subnet_id: subnet-XXXXXXXXXX - security_groups: - - sg-XXXXXXXXXXXXXX +network: + private_ip: true + subnet_id: subnet-XXXXXXXXXX + security_groups: + - sg-XXXXXXXXXXXXXX ``` #### user data example @@ -259,48 +265,48 @@ You can configure the following settings in your `pool.yml` file. You can also l Provide [cloud-init data](https://docs.drone.io/runner/vm/configuration/cloud-init/) in either `user_data_path` or `user_data`. ```yaml - user_data_path: /path/to/custom/user-data.yml +user_data_path: /path/to/custom/user-data.yml ``` ```yaml - user_data: | - #cloud-config - apt: - sources: - docker.list: - source: deb [arch={{ .Architecture }}] https://download.docker.com/linux/ubuntu $RELEASE stable - keyid: KEY_TO_IMPORT - packages: - - wget - - docker-ce - write_files: - - path: {{ .CaCertPath }} - permissions: '0600' - encoding: b64 - content: {{ .CACert | base64 }} - - path: {{ .CertPath }} - permissions: '0600' - encoding: b64 - content: {{ .TLSCert | base64 }} - - path: {{ .KeyPath }} - permissions: '0600' - encoding: b64 - content: {{ .TLSKey | base64 }} - runcmd: - - 'wget "{{ .LiteEnginePath }}/lite-engine-{{ .Platform }}-{{ .Architecture }}" -O /usr/bin/lite-engine' - - 'chmod 777 /usr/bin/lite-engine' - - 'touch /root/.env' - - 'touch /tmp/some_directory' - - '/usr/bin/lite-engine server --env-file /root/.env > /var/log/lite-engine.log 2>&1 &' +user_data: | + #cloud-config + apt: + sources: + docker.list: + source: deb [arch={{ .Architecture }}] https://download.docker.com/linux/ubuntu $RELEASE stable + keyid: KEY_TO_IMPORT + packages: + - wget + - docker-ce + write_files: + - path: {{ .CaCertPath }} + permissions: '0600' + encoding: b64 + content: {{ .CACert | base64 }} + - path: {{ .CertPath }} + permissions: '0600' + encoding: b64 + content: {{ .TLSCert | base64 }} + - path: {{ .KeyPath }} + permissions: '0600' + encoding: b64 + content: {{ .TLSKey | base64 }} + runcmd: + - 'wget "{{ .LiteEnginePath }}/lite-engine-{{ .Platform }}-{{ .Architecture }}" -O /usr/bin/lite-engine' + - 'chmod 777 /usr/bin/lite-engine' + - 'touch /root/.env' + - 'touch /tmp/some_directory' + - '/usr/bin/lite-engine server --env-file /root/.env > /var/log/lite-engine.log 2>&1 &' ``` #### disk example ```yaml - disk: - size: 16 - type: io1 - iops: iops +disk: + size: 16 + type: io1 + iops: iops ``` ## Start the runner @@ -366,11 +372,11 @@ For more information about delegates and delegate installation, go to [Delegate 1. Verify that the delegate and runner containers are running correctly. You might need to wait a few minutes for both processes to start. You can run the following commands to check the process status: - ``` - docker ps - docker logs DELEGATE_CONTAINER_ID - docker logs RUNNER_CONTAINER_ID - ``` + ``` + docker ps + docker logs DELEGATE_CONTAINER_ID + docker logs RUNNER_CONTAINER_ID + ``` 2. In the Harness UI, verify that the delegate appears in the delegates list. It might take two or three minutes for the Delegates list to update. Make sure the **Connectivity Status** is **Connected**. If the **Connectivity Status** is **Not Connected**, make sure the Docker host can connect to `https://app.harness.io`. @@ -398,7 +404,6 @@ Configure your pipeline's **Build** (`CI`) stage to use your AWS VMs as build in - ```yaml - stage: name: build @@ -461,13 +466,13 @@ With this feature flag enabled, Harness uses your [delegate selectors](/docs/pla Go to the [CI Knowledge Base](/kb/continuous-integration/continuous-integration-faqs) for questions and issues related to self-managed VM build infrastructures, including: -* [Build VM creation fails with no default VPC](/kb/continuous-integration/continuous-integration-faqs/#aws-build-vm-creation-fails-with-no-default-vpc) -* [AWS VM builds stuck at the initialize step on health check](/kb/continuous-integration/continuous-integration-faqs/#aws-vm-builds-stuck-at-the-initialize-step-on-health-check) -* [Delegate connected but builds fail](/kb/continuous-integration/continuous-integration-faqs/#aws-vm-delegate-connected-but-builds-fail) -* [Use internal or custom AMIs](/kb/continuous-integration/continuous-integration-faqs/#use-internal-or-custom-amis-with-self-managed-aws-vm-build-infrastructure) -* [Where can I find self-managed VM lite engine and cloud init output logs?](/kb/continuous-integration/continuous-integration-faqs/#where-can-i-find-logs-for-self-managed-aws-vm-lite-engine-and-cloud-init-output) -* [Can I use the same build VM for multiple CI stages?](/kb/continuous-integration/continuous-integration-faqs/#can-i-use-the-same-build-vm-for-multiple-ci-stages) -* [Why are build VMs running when there are no active builds?](/kb/continuous-integration/continuous-integration-faqs/#why-are-build-vms-running-when-there-are-no-active-builds) -* [How do I specify the disk size for a Windows instance in pool.yml?](/kb/continuous-integration/continuous-integration-faqs/#how-do-i-specify-the-disk-size-for-a-windows-instance-in-poolyml) -* [Clone codebase fails due to missing plugin](/kb/continuous-integration/continuous-integration-faqs/#clone-codebase-fails-due-to-missing-plugin) -* [Can I limit memory and CPU for Run Tests steps running on self-managed VM build infrastructure?](/kb/continuous-integration/continuous-integration-faqs/#can-i-limit-memory-and-cpu-for-run-tests-steps-running-on-harness-cloud) +- [Build VM creation fails with no default VPC](/kb/continuous-integration/continuous-integration-faqs/#aws-build-vm-creation-fails-with-no-default-vpc) +- [AWS VM builds stuck at the initialize step on health check](/kb/continuous-integration/continuous-integration-faqs/#aws-vm-builds-stuck-at-the-initialize-step-on-health-check) +- [Delegate connected but builds fail](/kb/continuous-integration/continuous-integration-faqs/#aws-vm-delegate-connected-but-builds-fail) +- [Use internal or custom AMIs](/kb/continuous-integration/continuous-integration-faqs/#use-internal-or-custom-amis-with-self-managed-aws-vm-build-infrastructure) +- [Where can I find self-managed VM lite engine and cloud init output logs?](/kb/continuous-integration/continuous-integration-faqs/#where-can-i-find-logs-for-self-managed-aws-vm-lite-engine-and-cloud-init-output) +- [Can I use the same build VM for multiple CI stages?](/kb/continuous-integration/continuous-integration-faqs/#can-i-use-the-same-build-vm-for-multiple-ci-stages) +- [Why are build VMs running when there are no active builds?](/kb/continuous-integration/continuous-integration-faqs/#why-are-build-vms-running-when-there-are-no-active-builds) +- [How do I specify the disk size for a Windows instance in pool.yml?](/kb/continuous-integration/continuous-integration-faqs/#how-do-i-specify-the-disk-size-for-a-windows-instance-in-poolyml) +- [Clone codebase fails due to missing plugin](/kb/continuous-integration/continuous-integration-faqs/#clone-codebase-fails-due-to-missing-plugin) +- [Can I limit memory and CPU for Run Tests steps running on self-managed VM build infrastructure?](/kb/continuous-integration/continuous-integration-faqs/#can-i-limit-memory-and-cpu-for-run-tests-steps-running-on-harness-cloud) diff --git a/docs/continuous-integration/use-ci/use-drone-plugins/run-a-git-hub-action-in-cie.md b/docs/continuous-integration/use-ci/use-drone-plugins/run-a-git-hub-action-in-cie.md index 3907a41ee04..f0a4d505529 100644 --- a/docs/continuous-integration/use-ci/use-drone-plugins/run-a-git-hub-action-in-cie.md +++ b/docs/continuous-integration/use-ci/use-drone-plugins/run-a-git-hub-action-in-cie.md @@ -10,7 +10,6 @@ helpdocs_is_published: true canonical_url: https://www.harness.io/blog/github-actions --- - import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; @@ -18,8 +17,8 @@ import TabItem from '@theme/TabItem'; There are two ways you can run GitHub Actions in Harness CI pipelines: -* If you are using Harness Cloud build infrastructure, use the [built-in GitHub Action step](./ci-github-action-step.md). -* For all other build infrastructures, use the [GitHub Actions Drone plugin](https://github.com/drone-plugins/github-actions) in a **Plugin** step. When your pipeline runs, the GitHub Actions Drone Plugin runs the GitHub Action in the background using [nektos/act](https://github.com/nektos/act). +- If you are using Harness Cloud build infrastructure, use the [built-in GitHub Action step](./ci-github-action-step.md). +- For all other build infrastructures, use the [GitHub Actions Drone plugin](https://github.com/drone-plugins/github-actions) in a **Plugin** step. When your pipeline runs, the GitHub Actions Drone Plugin runs the GitHub Action in the background using [nektos/act](https://github.com/nektos/act). This topic explains how to use the GitHub Actions Drone plugin in a Plugin step. @@ -39,11 +38,11 @@ You need a [CI pipeline](../prep-ci-pipeline-components.md) with a [Build stage] Use **Settings** to specify the GitHub Action you want to use and to pass variables and attributes required by the Action and the Drone Plugin. You must specify `uses` and `with`. You can use `env` to specify environment variables, such as GitHub tokens to access [private Action repos](#private-action-repos). -| Key | Description | Value format | Value example | -| - | - | - | - | -| `uses` | Required. Specify the Action's repo, along with a branch or tag.| `[repo]@[tag]` | `actions/setup-go@v3` | -| `with` | Required. Provide a map of key-value pairs representing settings required by the GitHub Action itself. | `key: value` | `go-version: '>=1.17.0'` or `{path: pom.xml, destination: cie-demo-pipeline/github-action, credentials: <+stage.variables.GCP_SECRET_KEY_BASE64>}` | -| `env` | Conditionally required. Specify a map of environment variables to pass to the Action. Required for [Private Action repos](#private-action-repos), [Duplicate Actions](#duplicate-actions), [Actions requiring a defined working directory](#actions-requiring-a-defined-working-directory), or if otherwise noted in the Action's usage specifications. | `key: value` | `GITHUB_TOKEN: <+secrets.getValue("github_pat")>` | +| Key | Description | Value format | Value example | +| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| `uses` | Required. Specify the Action's repo, along with a branch or tag. | `[repo]@[tag]` | `actions/setup-go@v3` | +| `with` | Required. Provide a map of key-value pairs representing settings required by the GitHub Action itself. | `key: value` | `go-version: '>=1.17.0'` or `{path: pom.xml, destination: cie-demo-pipeline/github-action, credentials: <+stage.variables.GCP_SECRET_KEY_BASE64>}` | +| `env` | Conditionally required. Specify a map of environment variables to pass to the Action. Required for [Private Action repos](#private-action-repos), [Duplicate Actions](#duplicate-actions), [Actions requiring a defined working directory](#actions-requiring-a-defined-working-directory), or if otherwise noted in the Action's usage specifications. | `key: value` | `GITHUB_TOKEN: <+secrets.getValue("github_pat")>` | :::tip @@ -51,64 +50,58 @@ You can use variable expressions for these values. For example, `credentials: <+ ::: - - ![A configured Plugin step with Settings variables.](./static/run-a-git-hub-action-in-cie-03.png) - - ```yaml - - step: - identifier: gcsuploader - name: gcsuploader - type: Plugin - spec: - connectorRef: YOUR_DOCKER_CONNECTOR_ID - image: plugins/github-actions - privileged: true - settings: - uses: google-github-actions/upload-cloud-storage@main # Specify the GitHub Action you want to use. - with: # Specify Action settings - path: pom.xml - destination: cie-demo-pipeline/github-action - credentials: <+stage.variables.GCP_SECRET_KEY_BASE64> ## This example uses a stage variable to store a secret. +- step: + identifier: gcsuploader + name: gcsuploader + type: Plugin + spec: + connectorRef: YOUR_DOCKER_CONNECTOR_ID + image: plugins/github-actions + privileged: true + settings: + uses: google-github-actions/upload-cloud-storage@main # Specify the GitHub Action you want to use. + with: # Specify Action settings + path: pom.xml + destination: cie-demo-pipeline/github-action + credentials: <+stage.variables.GCP_SECRET_KEY_BASE64> ## This example uses a stage variable to store a secret. ``` - - ### Private Action repos If you want to use an Action that is in a private repository, you must add a `GITHUB_TOKEN` environment variable to the **Plugin** step's `settings.env`. You need a [GitHub personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) that has pull permissions to the target repository. Additional permissions may be necessary depending on the Action's purpose. Store the token as a [Harness secret](/docs/category/secrets) and use a variable expression, such as `<+secrets.getValue("YOUR_TOKEN_SECRET")>`, to call it. -* Key: `GItHUB_TOKEN` -* Value: `<+secrets.getValue("YOUR_TOKEN_SECRET")>` +- Key: `GItHUB_TOKEN` +- Value: `<+secrets.getValue("YOUR_TOKEN_SECRET")>` Here's an example of the YAML for a `Plugin` step using an Action in a private repo: ```yaml - - step: - type: Plugin - name: private action - identifier: private_action - spec: - connectorRef: dockerhub - image: plugins/github-actions - privileged: true - settings: - uses: myorg/private-action-step@v1 - with: - path: pom.xml - env: - GITHUB_TOKEN: <+secrets.getValue("github_pat")> +- step: + type: Plugin + name: private action + identifier: private_action + spec: + connectorRef: dockerhub + image: plugins/github-actions + privileged: true + settings: + uses: myorg/private-action-step@v1 + with: + path: pom.xml + env: + GITHUB_TOKEN: <+secrets.getValue("github_pat")> ``` ### Duplicate Actions @@ -155,28 +148,64 @@ In this example, two parallel `Plugin` steps run the same GitHub Action. Each st XDG_CACHE_HOME: /home/ubuntu/.cache2 ``` -### Actions requiring a defined working directory +### Output Variables from GitHub Actions Drone Plugin Step -Some GitHub Actions need to run on the cloned [codebase](/docs/continuous-integration/use-ci/codebase-configuration/create-and-configure-a-codebase). The GitHub Action plugin doesn't automatically set a working directory. +When using GitHub Actions Drone Plugin step in Harness CI, it is now possible to output variables from steps in your workflow. This feature allows seamless passing of values between steps, enabling complex pipelines and dynamic workflows. -If this is required by the Action you want to run, and the Action offers a working directory parameter, then you need to specify the working directory as `/harness`. For example: +Here's an example pipeline that demonstrates how to use GitHub Actions Drone Plugin step in Harness CI to output variables and reference them in subsequent steps: -```yaml +```YAML + execution: + steps: - step: + identifier: gha_plugin type: Plugin - name: Action docker publish image - identifier: Action_docker_publish_image + name: gha_plugin spec: connectorRef: account.harnessImage - image: plugins/github-actions - privileged: true + image: plugins/github-actions:1.0.0 settings: - uses: elgohr/Publish-Docker-Github-Action@v4 + uses: Ompragash/maths-action@main with: - name: dockerhub/publish-docker-image - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - workdir: /harness + input1: "15" + input2: "25" + imagePullPolicy: Always + - step: + identifier: Run_1 + type: Run + name: Run_1 + spec: + connectorRef: account.harnessImage + image: alpine + shell: Sh + command: |- + echo <+execution.steps.gha_plugin.output.outputVariables.sum> + echo <+execution.steps.gha_plugin.output.outputVariables.product> + echo <+execution.steps.gha_plugin.output.outputVariables.message> +``` + +### Actions requiring a defined working directory + +Some GitHub Actions need to run on the cloned [codebase](/docs/continuous-integration/use-ci/codebase-configuration/create-and-configure-a-codebase). The GitHub Action plugin doesn't automatically set a working directory. + +If this is required by the Action you want to run, and the Action offers a working directory parameter, then you need to specify the working directory as `/harness`. For example: + +```yaml +- step: + type: Plugin + name: Action docker publish image + identifier: Action_docker_publish_image + spec: + connectorRef: account.harnessImage + image: plugins/github-actions + privileged: true + settings: + uses: elgohr/Publish-Docker-Github-Action@v4 + with: + name: dockerhub/publish-docker-image + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + workdir: /harness ``` If the Action ingests the working directory as an environment variable, place it under `env`. @@ -257,6 +286,6 @@ When you run the pipeline, you can observe the GitHub Action plugin logs in the Go to the [CI Knowledge Base](/kb/continuous-integration/continuous-integration-faqs) for questions and issue related to plugins and integrations, including GitHub Actions. For example: -* [Can't connect to Docker daemon](/kb/continuous-integration/continuous-integration-faqs/#github-action-step-cant-connect-to-docker-daemon) -* [Not a git repository (or any of the parent directories)](/kb/continuous-integration/continuous-integration-faqs/#github-action-step-fails-with-not-a-git-repository-or-any-of-the-parent-directories) -* [PATH variable overwritten in parallel GitHub Action steps](/kb/continuous-integration/continuous-integration-faqs/#why-is-the-path-variable-overwritten-in-parallel-github-actions-steps) +- [Can't connect to Docker daemon](/kb/continuous-integration/continuous-integration-faqs/#github-action-step-cant-connect-to-docker-daemon) +- [Not a git repository (or any of the parent directories)](/kb/continuous-integration/continuous-integration-faqs/#github-action-step-fails-with-not-a-git-repository-or-any-of-the-parent-directories) +- [PATH variable overwritten in parallel GitHub Action steps](/kb/continuous-integration/continuous-integration-faqs/#why-is-the-path-variable-overwritten-in-parallel-github-actions-steps) diff --git a/docs/database-devops/use-database-devops/rollback-for-database-schemas.md b/docs/database-devops/use-database-devops/rollback-for-database-schemas.md index 5311ff60f98..1ce05ce9ee8 100644 --- a/docs/database-devops/use-database-devops/rollback-for-database-schemas.md +++ b/docs/database-devops/use-database-devops/rollback-for-database-schemas.md @@ -7,11 +7,31 @@ sidebar_position: 4 This topic describes how Harness Database DevOps implements automated rollback to maintain the stability and integrity of your database schema workloads. -## Accelerate Innovation Velocity +## What are tags? -Harness DB DevOps is a powerful tool that helps accelerate innovation velocity, particularly in the context of managing and evolving database schemas. By using DB DevOps, development teams can introduce new features, improve existing ones, and deploy updates more rapidly and safely. +A tag is a marker or label assigned to a specific point in a database's migration history. Harness recommends creating a change every time you deploy a changeset to a database so that you always have a rollback point for future changes. + +### Rollback A Database Schema + +To rollback a database schema, this action refers to rolling back changes you've made to your database based on a specified tag of your creation. -By integrating DB DevOps into your development workflow, teams can accelerate their innovation velocity by automating and streamlining database schema changes. DB DevOps reduces the friction associated with database updates, allowing teams to focus on delivering new features and improvements faster and with greater confidence. This, in turn, enhances the overall agility of the development process, enabling organizations to stay competitive and responsive to market demands. +Here is how you can rollback a database within Harness Database DevOps: + + 1. In Harness, go to the **Database DevOps** module and select your **Project**. + 2. Determine the tag you want to roll back to. This tag represents a specific state of the database schema that you want to revert to after applying changes. + 3. Select the **Pipelines** tab on the side menu, and open the pipeline in your Harness Database DevOps interface where the rollback will be executed. + 4. In the configuration for the rollback step, you will need to provide the following details: + - **Schema Name**: Specify the name of the database schema that you want to roll back. + - **Instance Name**: Indicate the database instance where the rollback will take place. This is typically defined by a JDBC URL, user, and password. + - **Rollback Tag**: Enter the name of the tag to which you want to roll back the schema. + 5. Run the pipeline with the configured rollback step. + 6. Keep an eye on the execution logs to ensure that the rollback is successful. The logs will provide information about the actions taken + during the rollback. + 7. After the rollback is complete, verify that the database schema has been reverted to the desired state. This may involve checking the schema structure and ensuring that any changes made after the specified tag have been undone. + +:::info +You can refer to the Harness documentation detailing how to [Add a Liquibase command step](/docs/database-devops/use-database-devops/add-liquibase-command-step.md) +::: ## Built in failure strategies including rollback @@ -49,7 +69,8 @@ In some cases, you might need to roll back changes that were successfully applie For these scenarios, Harness DB DevOps provides a 'Rollback Schema' pipeline step. This step can be configured in your pipeline to roll back to a particular tag, allowing you to revert your database schema to a known good state. -### What are tags? +## Accelerate Innovation Velocity -A tag is a marker or label assigned to a specific point in a database's migration history. Harness recommends creating a change every time you deploy a changeset to a database so that you always have a rollback point for future changes. +Harness DB DevOps is a powerful tool that helps accelerate innovation velocity, particularly in the context of managing and evolving database schemas. By using DB DevOps, development teams can introduce new features, improve existing ones, and deploy updates more rapidly and safely. +By integrating DB DevOps into your development workflow, teams can accelerate their innovation velocity by automating and streamlining database schema changes. DB DevOps reduces the friction associated with database updates, allowing teams to focus on delivering new features and improvements faster and with greater confidence. This, in turn, enhances the overall agility of the development process, enabling organizations to stay competitive and responsive to market demands. \ No newline at end of file diff --git a/docs/infra-as-code-management/iacm-features/default-pipelines.md b/docs/infra-as-code-management/iacm-features/default-pipelines.md index fd4cced9b0b..c32fdae9990 100644 --- a/docs/infra-as-code-management/iacm-features/default-pipelines.md +++ b/docs/infra-as-code-management/iacm-features/default-pipelines.md @@ -7,7 +7,7 @@ sidebar_position: 30 import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -IaCM Default Pipelines simplify the process of provisioning, destroying, and managing infrastructure workspaces by providing quick access to your pre-configured pipelines. These pipelines can be created in the normal fashion, assigned as defaults at the project level and triggered within any of that project's workspaces. This provides a much more streamlined experience and helps to ensure consistency across all of your workspaces. +IaCM Default Pipelines offer a streamlined approach to provisioning, destroying, and managing infrastructure workspaces by allowing you to run your pre-configured pipelines directly from any workspace. These pipelines can be easily created and set as defaults at the project level, providing quick and consistent access across all workspaces. This functionality ensures a seamless experience and maintains uniformity throughout your infrastructure management. In addition, as some workspaces may require specialized pipelines, you can overwrite the defaults at the workspace level. This will not disrupt any other workspace using the project level defaults. diff --git a/docs/infra-as-code-management/iacm-features/module-registry.md b/docs/infra-as-code-management/iacm-features/module-registry.md index a64e1681978..911c5b1123a 100644 --- a/docs/infra-as-code-management/iacm-features/module-registry.md +++ b/docs/infra-as-code-management/iacm-features/module-registry.md @@ -16,12 +16,16 @@ sidebar_label: Module registry import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; + +:::tip pending release +IaCM Module Registry is currently pending release and will be available soon! +::: -With Harness module registry, you can speed up and simplify your infrastructure management by reusing pre-built modules. This helps you avoid reinventing the wheel every time you need to set up common components, such as virtual machines, databases, or networks. +The Harness IaCM Module Registry is a centralized repository where you can publish and manage versions of pre-built infrastructure modules. These modules, which include components like virtual machines, databases, and networks, can be reused by different teams, streamlining infrastructure management and ensuring consistency across projects. ### Prerequisites Before you begin configuring module registry, ensure that you have: -- Access to your OpenTofu or Terraform environment via [Harness connectors](https://developer.harness.io/docs/infra-as-code-management/get-started/onboarding-guide#add-connectors). +- Access to your OpenTofu or Legacy Terraform environment via [Harness connectors](https://developer.harness.io/docs/infra-as-code-management/get-started/onboarding-guide#add-connectors). :::tip delegate version If you are using Harness to connect to your code repository, you can continue without further action. diff --git a/docs/infra-as-code-management/pipelines/iacm-plugins/terraform-plugins.md b/docs/infra-as-code-management/pipelines/iacm-plugins/terraform-plugins.md index a6b9bb56845..44ed1058276 100644 --- a/docs/infra-as-code-management/pipelines/iacm-plugins/terraform-plugins.md +++ b/docs/infra-as-code-management/pipelines/iacm-plugins/terraform-plugins.md @@ -61,7 +61,7 @@ The `plan-refresh-only` command focuses on updating the state file to mirror rea - **State Refresh**: Updates the state file with the current infrastructure status. :::tip use case -In scenarios where drift is detected, but there are unreviewed changes pending in your configuration code, the `plan-refresh-only` step is ideal. It refreshes the state to reconcile drift without applying any pending code updates, ensuring only the manual changes are addressed. Go to [Drift Detection](https://developer.harness.io/docs/infra-as-code-management/pipelines/operations/drift-detection/#handle-drift) to see a specific example. +In scenarios where drift is detected, but there are unreviewed changes pending in your configuration code, the `plan-refresh-only` step is ideal. It refreshes the state to reconcile drift without applying any pending code updates, ensuring only the manual changes are addressed. Go to [Drift Detection](https://developer.harness.io/docs/infra-as-code-management/use-iacm/drift-detection/#handle-drift) to see a specific example. ::: ### Apply - Refresh Only diff --git a/docs/infra-as-code-management/pipelines/operations/drift-detection.md b/docs/infra-as-code-management/use-iacm/drift-detection.md similarity index 90% rename from docs/infra-as-code-management/pipelines/operations/drift-detection.md rename to docs/infra-as-code-management/use-iacm/drift-detection.md index e3126f57c44..54d65e6409f 100644 --- a/docs/infra-as-code-management/pipelines/operations/drift-detection.md +++ b/docs/infra-as-code-management/use-iacm/drift-detection.md @@ -14,7 +14,7 @@ To detect drift, follow these steps: 2. Choose a Workspace or set it as a runtime input 3. Select "Detect Drift" when prompted to choose operation -![Resources](./static/drift-pipeline.png) +![Resources](static/drift-pipeline.png) 4. To schedule drift detection regularly, define a [cron trigger for the pipeline](https://developer.harness.io/docs/platform/triggers/schedule-pipelines-using-cron-triggers/) @@ -22,21 +22,21 @@ When executed, the pipeline will fail if drift is detected, and you will be able **In the pipeline** If you go to the "Resources" tab, the "Drift Changes" section will outline all the resources where the drift was detected. Clicking on each resource will highlight which attribute has drifted -![Resources](./static/drift-pipeline-detected.png) +![Resources](static/drift-pipeline-detected.png) **In the Workspace** When drift is detected, the resources will go into a "Drifted" mode Under the "Resources" tab, you will be able to see which resources are in drift -![Resources](./static/Workspace-drift.png) +![Resources](static/Workspace-drift.png) Clicking on each resource will highlight which attribute drifted -![Resources](./static/Workspace-drift-attributes.png) +![Resources](static/Workspace-drift-attributes.png) ## Drift detection during resource provisioning Harness IaCM can also detect drift during a provisioning operation. If during execution, IaCM identifies drift, the drift information will be populated in the Approval step and "Resources" Tab -![Resources](./static/provision-drift.png) +![Resources](static/provision-drift.png) ## Handle drift To promote best practices, use your IaC repository as the central source of truth for your infrastructure. When drift occurs due to external actions, such as manual changes in your cloud provider console, you can run a provision pipeline to realign your infrastructure with the state defined in your code. @@ -48,4 +48,4 @@ In scenarios where drift is detected, but there are unreviewed changes pending i **Example workflow:** The following pipeline demonstrates how to handle drift using plan-refresh-only: -![plan-refresh-only](./static/plan-refresh-only.png) \ No newline at end of file +![plan-refresh-only](static/plan-refresh-only.png) \ No newline at end of file diff --git a/docs/infra-as-code-management/pipelines/operations/static/Workspace-drift-attributes.png b/docs/infra-as-code-management/use-iacm/static/Workspace-drift-attributes.png similarity index 100% rename from docs/infra-as-code-management/pipelines/operations/static/Workspace-drift-attributes.png rename to docs/infra-as-code-management/use-iacm/static/Workspace-drift-attributes.png diff --git a/docs/infra-as-code-management/pipelines/operations/static/Workspace-drift.png b/docs/infra-as-code-management/use-iacm/static/Workspace-drift.png similarity index 100% rename from docs/infra-as-code-management/pipelines/operations/static/Workspace-drift.png rename to docs/infra-as-code-management/use-iacm/static/Workspace-drift.png diff --git a/docs/infra-as-code-management/pipelines/operations/static/drift-pipeline-detected.png b/docs/infra-as-code-management/use-iacm/static/drift-pipeline-detected.png similarity index 100% rename from docs/infra-as-code-management/pipelines/operations/static/drift-pipeline-detected.png rename to docs/infra-as-code-management/use-iacm/static/drift-pipeline-detected.png diff --git a/docs/infra-as-code-management/pipelines/operations/static/drift-pipeline.png b/docs/infra-as-code-management/use-iacm/static/drift-pipeline.png similarity index 100% rename from docs/infra-as-code-management/pipelines/operations/static/drift-pipeline.png rename to docs/infra-as-code-management/use-iacm/static/drift-pipeline.png diff --git a/docs/infra-as-code-management/pipelines/operations/static/plan-refresh-only.png b/docs/infra-as-code-management/use-iacm/static/plan-refresh-only.png similarity index 100% rename from docs/infra-as-code-management/pipelines/operations/static/plan-refresh-only.png rename to docs/infra-as-code-management/use-iacm/static/plan-refresh-only.png diff --git a/docs/infra-as-code-management/pipelines/operations/static/provision-drift.png b/docs/infra-as-code-management/use-iacm/static/provision-drift.png similarity index 100% rename from docs/infra-as-code-management/pipelines/operations/static/provision-drift.png rename to docs/infra-as-code-management/use-iacm/static/provision-drift.png diff --git a/docs/internal-developer-portal/flows/custom-actions.md b/docs/internal-developer-portal/flows/custom-actions.md index 632c5649ebb..621f9f94262 100644 --- a/docs/internal-developer-portal/flows/custom-actions.md +++ b/docs/internal-developer-portal/flows/custom-actions.md @@ -6,7 +6,7 @@ sidebar_position: 4 ## Introduction -The Workflows come with several built-in actions for fetching content, registering in the catalog and of course actions for creating and publishing a git repository. +**Workflow Actions** in IDP are integration points with third-party tools, used to take inputs from workflows frontend and execute specific tasks based on users input. The Workflows come with several built-in actions for fetching content, registering in the catalog and of course actions for creating and publishing a git repository. There are several repository providers supported out of the box such as **GitHub**, **Azure**, **GitLab** and **Bitbucket**. @@ -34,7 +34,9 @@ This action currently supports [IDP Stage](https://developer.harness.io/docs/int ::: -This Worfklow action requires **variables of type pipeline, stage or step** as input along with the **pipeline url**(for pipelines using [Git Experience](https://developer.harness.io/docs/platform/git-experience/git-experience-overview) make sure your URL includes `branch` and `repoName` e.g., `https://app.harness.io/ng/account/accountID/module/idp/orgs/orgID/projects/projectID/pipelines/pipelineID?repoName=repo-name&branch=branch`), and then trigger the pipeline based in the `inputset` obtained from the user. +This Worfklow action requires **variables of type pipeline, stage or step** as input along with the **pipeline url**, and then trigger the pipeline based in the `inputset` obtained from the user. + +> Note: For pipelines using [Git Experience](https://developer.harness.io/docs/platform/git-experience/git-experience-overview) make sure your URL includes `branch` and `repoName` e.g., `https://app.harness.io/ng/account/accountID/module/idp/orgs/orgID/projects/projectID/pipelines/pipelineID?repoName=repo-name&branch=branch` ```YAML ... @@ -48,7 +50,7 @@ steps: inputset: project_name: ${{ parameters.project_name }} github_repo: ${{ parameters.github_repo }} - cloud_provider: ${{ parameters.provider }} + pipeline.variables.cloud_provider: ${{ parameters.provider }} db: ${{ parameters.db }} cache: ${{ parameters.cache }} apikey: ${{ parameters.token }} @@ -93,11 +95,17 @@ inputset: pipeline.variables.project_name: ${{ parameters.project_name }} pipeline.stages.originalStageID.variables.github_repo: ${{ parameters.github_repo }} ``` -To obtain these references, simply copy the variable path from the Harness Pipeline Studio UI. +To obtain these references, simply copy the variable path from the Harness Pipeline Studio UI and remove the special characters `<+` and `>`. + +> Note: **This is the only way to reference stage variables and pipelines using templates with variables under `inputset`. Without the fully qualified path, the input will not be valid.** ![](./static/stage-variable.png) -> Note: **This is the only way to reference stage and step variables under `inputset`, without the fully qualified path, the input isn't valid.** +| Variables under `inputset` | What's Supported | +|----------------------------------------------------------------------------|---------------------------------------------------------------------------------| +| Variable name (`variable_name`) | Supported with Pipelines Variables for IDP stage, custom stage and Codebase Disabled build stage along Pipelines **not** containing any templates. | +| Variable name with Fully Qualified Path (`pipeline.variables.variable_name`) | Supported with Pipelines Variables for all supported stages and Pipelines containing any templates. | + :::info diff --git a/docs/internal-developer-portal/flows/flows-output.md b/docs/internal-developer-portal/flows/flows-output.md index 168dd079d40..03ca004ac1d 100644 --- a/docs/internal-developer-portal/flows/flows-output.md +++ b/docs/internal-developer-portal/flows/flows-output.md @@ -88,6 +88,39 @@ Let's take a look at the inputs that the Workflow expects from a developer. The The `spec.steps` field contains only one action, and that is to trigger a Harness pipeline. It takes the pipeline `url`,`inputset` containing all the runtime input variables that the pipeline needs and the `apikey` as input. +:::info +The syntax `${{ parameters.x }}` is supported exclusively within the `steps` section when configuring the Workflows Backend. It cannot be used within the `properties` section to reference another parameter. + +```YAML {16,24-25} +## Example workflows.yaml +... +spec: + parameters: + - title: Service Details + properties: + projectId: + title: Project Identifier + description: Harness Project Identifier + type: string + ui:field: HarnessProjectPicker + template_type: + title: Type of the Template + type: string + description: Type of the Template + ui:readonly: $${{ parameters.another_field}} ## NOT SUPPORTED + steps: + - id: trigger + name: Creating your react app + action: trigger:harness-custom-pipeline + input: + url: "https://app.harness.io/ng/account/account_id/module/idp/orgs/org_id/projects/project_id/pipelines/pipeline_id/pipeline-studio/?storeType=INLINE" + inputset: + project_id: ${{ parameters.projectId }} ## SUPPORTED + template_type: ${{ parameters.template_type }} ## SUPPORTED +... +``` +::: + [Steps](/docs/internal-developer-portal/flows/service-onboarding-pipelines#building-the-workflow-backend) is where you integrate the Harness Pipeline as a Backend and are the core execution units within Workflows. Each step runs an action that might involve triggering a CI/CD pipeline, creating a service in a catalog, or provisioning infrastructure resources. ### Manage variables in the pipeline diff --git a/docs/internal-developer-portal/flows/idp-stage.md b/docs/internal-developer-portal/flows/idp-stage.md index 36b68bd0d54..95439c651b3 100644 --- a/docs/internal-developer-portal/flows/idp-stage.md +++ b/docs/internal-developer-portal/flows/idp-stage.md @@ -11,15 +11,26 @@ import TabItem from '@theme/TabItem'; ## Introduction - The self-service flow in IDP is powered by the Harness Pipelines. A stage is a part of a pipeline that contains the logic to perform a major segment of a larger workflow defined in a pipeline. Stages are often based on the different workflow milestones, such as building, approving, and delivering. The process of adding a stage to a pipeline is the same for all Harness modules. When you add a stage to a pipeline, you select the stage type, such as **Developer Portal** for IDP or **Build** for CI or **Deploy** for CD. The available stage settings are determined by the stage type, and, if applicable, the module associated with the selected stage type. This functionality is limited to the modules and settings that you have access to. -## How to Add the Developer Portal Stage +:::info + +#### Limitations + +- The "Clone Codebase (Git Clone)" action is not supported at the stage level for the IDP stage. + + +- [Looping strategies](https://developer.harness.io/docs/platform/pipelines/looping-strategies/looping-strategies-matrix-repeat-and-parallelism) (Parallelism, Matrix, Repeat) are not supported for the IDP stage. + +::: + + +## How to Add the Developer Portal Stage diff --git a/docs/internal-developer-portal/flows/service-onboarding-pipelines.md b/docs/internal-developer-portal/flows/service-onboarding-pipelines.md index b2a9bcc71ae..62b149f1254 100644 --- a/docs/internal-developer-portal/flows/service-onboarding-pipelines.md +++ b/docs/internal-developer-portal/flows/service-onboarding-pipelines.md @@ -11,9 +11,9 @@ redirect_from: IDP Self Service Workflows enable Platform Engineers, Infrastructure teams and DevOps Engineers to automate New Service Onboarding, New Infrastructure Onboarding and Day 2 Operations for Developers within your organization. As a Platform Engineer, you can create a Workflow that prompts developers for necessary details and then executes a Harness Pipeline to orchestrate what is needed. This can be generating a new code repository with the initial code, CI/CD pipelines and necessary infrastructure. -The Workflow is defined using a YAML file usually called `workflow.yaml`. The [syntax](https://backstage.io/docs/features/software-templates/input-examples) of the Workflow definition is managed by Backstage.io. IDP Workflows are also known as Backstage Software Template since they are usually used to standardize how a new Software is created in an organization. +The Workflow is defined using a YAML file usually called `workflow.yaml`. The [syntax](https://backstage.io/docs/features/software-templates/writing-templates) of the Workflow definition is managed by Backstage.io. IDP Workflows are also known as Backstage Software Template since they are usually used to standardize how a new Software is created in an organization. -**Workflows** are also **Catalog** entities of **`kind: Template`**. +**Workflows** are also **catalog entities** of `kind: Template`. It consists of a frontend that collects input from users and a backend configured to perform specific tasks based on the user's input taken in frontend and the [workflow actions](https://developer.harness.io/docs/internal-developer-portal/flows/custom-actions) used. ![](./static/workflows-mindmap.png) @@ -40,7 +40,7 @@ metadata: - javascript # these are the steps which are rendered in the frontend with the form input spec: - owner: debabrata.panigrahi@harness.io + owner: d.p@harness.io type: service parameters: - title: Service Details @@ -115,19 +115,11 @@ Read More on [Backstage Software Template](https://backstage.io/docs/features/so ## Syntax of Workflows YAML -Variables in the Workflow YAML are wrapped in `${{ }}`. These are used for linking different parts of the Workflows together. All the form inputs from the `parameters` section will be available by using this syntax. For example `${{ parameters.project_name }}` inserts the value of `project_name` from the parameters entered by the user in the UI. This is great for passing the values from the form into different steps and reusing these input variables. These strings preserve the type of the parameter. - -The `${{ parameters.project_name }}` pattern is used in the Workflows YAML to pass the parameters from the UI to the input of the `trigger:harness-custom-pipeline` step. - -As you can see above in the `Outputs` section, `actions` and `steps` can also generate outputs. You can grab that output using `steps.$stepId.output.$property`. - -It is important to remember that all examples are based on the [react-jsonschema-form](https://rjsf-team.github.io/react-jsonschema-form/) project. - Now let's dive in and pick apart what each of these sections do and what they are. ![](./static/yaml-syntax-workflow.png) -## Creating the frontend of the Workflow +### Creating the frontend of the Workflow The UI of an IDP Workflow can be defined using a `workflows.yaml` file with metadata and `parameters` field. In a Workflow, the **input parameters** are the first interaction point for your users. It defines the structure and types of data needed to initiate the pipeline. @@ -150,7 +142,7 @@ The UI of an IDP Workflow can be defined using a `workflows.yaml` file with meta 3. **Required Fields**: Workflows allow developers to enforce required fields. For example, the field `age` or `owner` could be marked as mandatory, ensuring critical data is not skipped during onboarding. -### Where to add the Workflow forms +#### Where to add the Workflow forms The input fields in `parameters` can be modified in a sequence. Note that it accepts an array which means either there can be one page of input fields or it can be broken up into multiple different pages which would be rendered as different steps in the form. @@ -212,7 +204,7 @@ spec: -### Using the Workflows Playground +#### Using the Workflows Playground :::caution @@ -228,13 +220,15 @@ To access the playground, go to your Workflows page and click on **Open Playgrou ![](./static/template-editoer-2.png) ![](./static/template-editor-3.png) -## Building the Workflow Backend +### Building the Workflow Backend -### Where to Add the Backend Integration: Action Customization +#### Where to Add the Backend Integration: Action Customization **Steps** are the core execution units in a Workflow. This is typically used to trigger a Harness pipeline which orchestrates repo creation, service onboarding in the catalog or provisioning infrastructure resources. The inputs gathered from the user are passed into the action, and the outputs are generated based on the results of each action. -The most common actions used in a Workflow are - +**Workflow Actions** in IDP are integration points with third-party tools, used to take inputs from workflows frontend and execute specific tasks based on users input. + +These are some examples used in a Workflow - - **Triggering Pipelines**: Using [`trigger:harness-custom-pipeline`](https://developer.harness.io/docs/internal-developer-portal/flows/custom-actions#1-triggerharness-custom-pipeline) to trigger pipelines in Harness for various actions like creating repository, [onboarding new service](https://developer.harness.io/docs/internal-developer-portal/flows/create-a-new-service-using-idp-stage), etc. - **Creating Repositories**: Using [`trigger:harness-custom-pipeline`](https://developer.harness.io/docs/internal-developer-portal/flows/custom-actions#1-triggerharness-custom-pipeline) and execute a pipeline with [create-repo stage](https://developer.harness.io/docs/internal-developer-portal/flows/idp-stage#3-create-repo) to generate a new repository based on the provided input. @@ -261,6 +255,48 @@ These follow the same standard format: url: ${{ steps.trigger.output.PipelineUrl }} ``` +Variables in the Workflow YAML are wrapped in `${{ }}`. These are used for linking different parts of the Workflows together. All the form inputs from the `parameters` section will be available by using this syntax. For example `${{ parameters.project_name }}` inserts the value of `project_name` from the parameters entered by the user in the UI. This is great for passing the values from the form into different steps and reusing these input variables. These strings preserve the type of the parameter. + +The `${{ parameters.project_name }}` pattern is used in the Workflows YAML to pass the parameters from the UI to the input of the `trigger:harness-custom-pipeline` step. + +:::info +The syntax `${{ parameters.x }}` is supported exclusively within the `steps` section when configuring the [Workflows Backend](#building-the-workflow-backend). It cannot be used within the `properties` section to reference another parameter. + +```YAML {16,24-25} +## Example workflows.yaml +... +spec: + parameters: + - title: Service Details + properties: + projectId: + title: Project Identifier + description: Harness Project Identifier + type: string + ui:field: HarnessProjectPicker + template_type: + title: Type of the Template + type: string + description: Type of the Template + ui:readonly: $${{ parameters.another_field}} ## NOT SUPPORTED + steps: + - id: trigger + name: Creating your react app + action: trigger:harness-custom-pipeline + input: + url: "https://app.harness.io/ng/account/account_id/module/idp/orgs/org_id/projects/project_id/pipelines/pipeline_id/pipeline-studio/?storeType=INLINE" + inputset: + project_id: ${{ parameters.projectId }} ## SUPPORTED + template_type: ${{ parameters.template_type }} ## SUPPORTED +... +``` + +::: + +As you can see above in the `Outputs` section, `actions` and `steps` can also generate outputs. You can grab that output using `steps.$stepId.output.$property`. + +It is important to remember that all examples are based on the [react-jsonschema-form](https://rjsf-team.github.io/react-jsonschema-form/) project. + [Read More](/docs/internal-developer-portal/flows/custom-actions) about the Available Workflow Actions. ### Using Harness Pipelines as the Orchestrator diff --git a/docs/internal-developer-portal/flows/static/git-clone-not.png b/docs/internal-developer-portal/flows/static/git-clone-not.png new file mode 100644 index 00000000000..26e4f533911 Binary files /dev/null and b/docs/internal-developer-portal/flows/static/git-clone-not.png differ diff --git a/docs/internal-developer-portal/plugins/available-plugins/jenkins.md b/docs/internal-developer-portal/plugins/available-plugins/jenkins.md index 34a2c3d4210..9637414c27c 100644 --- a/docs/internal-developer-portal/plugins/available-plugins/jenkins.md +++ b/docs/internal-developer-portal/plugins/available-plugins/jenkins.md @@ -68,7 +68,7 @@ To configure the plugin for a service in the software catalog, set the following ```yaml metadata: annotations: - jenkins.io/github-folder: "folder-name/project-name" + jenkins.io/github-folder: "folder-name/project-name" ``` ## Support diff --git a/docs/internal-developer-portal/plugins/custom-plugins/add-a-custom-plugin.md b/docs/internal-developer-portal/plugins/custom-plugins/add-a-custom-plugin.md index 7860f62e8c2..76b0867f22d 100644 --- a/docs/internal-developer-portal/plugins/custom-plugins/add-a-custom-plugin.md +++ b/docs/internal-developer-portal/plugins/custom-plugins/add-a-custom-plugin.md @@ -237,7 +237,7 @@ When a request is made to `https://idp.harness.io/{ACCOUNT_ID}/idp/api/proxy/git :::info -Once you enable the PLugin it will approximately take around 30 minutes for the plugin to be enabled as we rebuild the IDP image with your plugin. +Once you enable the Plugin it will approximately take around 30 minutes for the plugin to be enabled as we rebuild the IDP image with your plugin. ::: diff --git a/docs/internal-developer-portal/scorecards/scorecard.md b/docs/internal-developer-portal/scorecards/scorecard.md index 78698c071ee..544ad1a676a 100644 --- a/docs/internal-developer-portal/scorecards/scorecard.md +++ b/docs/internal-developer-portal/scorecards/scorecard.md @@ -41,7 +41,7 @@ If you have Scorecard UI components visible on your Catalog component pages, you ### Scorecard Components in IDP -Scorecard has two main UI components which are developer facing and lives in the Catalog - 1. A small **Card** for the Overview page with the scores and, 2. a **Tab** view with details of the checks and how the score got computed. This is illustrated below. The Tab view contains detailed comprehensive information as shown in the image under [overview](/docs/internal-developer-portal/scorecards/scorecard#overview) +Scorecard has two main UI components which are developer facing and lives in the Catalog - 1. A small **Card** for the Overview page with the scores and, 2. A **Tab** view with details of the checks and how the score got computed. This is illustrated below. The Tab view contains detailed comprehensive information as shown in the image under [overview](/docs/internal-developer-portal/scorecards/scorecard#overview) @@ -92,6 +92,10 @@ Make sure the values under **Filter catalog entities for which the scorecard is ## Scorecard Overview Page +:::info +The overview page is generated by jobs that run once every 24 hours. For newly created scorecards, you will need to wait until the next iteration at 12:00 AM UTC for the overview page to appear. +::: + - Once you have created your Scorecard, in the Overview page you can view the number of components for which it is applied and the overall health of the components in terms of "%" diff --git a/docs/internal-developer-portal/techdocs/enable-docs.md b/docs/internal-developer-portal/techdocs/enable-docs.md index 8c2673d840e..43b8dfad988 100644 --- a/docs/internal-developer-portal/techdocs/enable-docs.md +++ b/docs/internal-developer-portal/techdocs/enable-docs.md @@ -20,13 +20,22 @@ Docs in Harness IDP is powered by [TechDocs Backstage Plugin](https://backstage. ::: - - + + +### Docs Available in the Root of Source Folder \{#docs-available-in-the-root-of-source-folder} To add documentation: -1. Create a `docs` directory next to where you have `catalog-info.yaml`. +1. Create a `docs` directory next to where you have `catalog-info.yaml`. The directory tree would look something like this: + +```sh +├── catalog-info.yaml +├── mkdocs.yml +└── docs + └── index.md +``` + 2. Inside the `docs` directory, create a `index.md` file with the following contents. ``` @@ -78,10 +87,25 @@ topics outlined in this example table: TechDocs uses MkDocs as the static site generator. Visit https://www.mkdocs.org for more information about MkDocs. ``` +3. Now add an `mkdocs.yaml` next to where you have the `catalog-info.yaml`, `mkdocs.yml` file is a sibling of `catalog-info.yaml` -3. Edit the `catalog-info.yaml` and add the TechDocs annotation. +Here's the content for `mkdocs.yaml`: -4. In the `metadata.annotations` field, add `backstage.io/techdocs-ref: dir:.`. +```YAML +site_name: 'Example Documentation' +repo_url: https://github.com/your_org/your_repo +edit_uri: url to your index.md + +nav: + - Home: index.md + +plugins: + - techdocs-core +``` + +4. Edit the `catalog-info.yaml` and add the TechDocs annotation. + +5. In the `metadata.annotations` field, add `backstage.io/techdocs-ref: dir:.`. ![](static/techdocs-ref.png) @@ -117,9 +141,24 @@ If, for example, you wanted to keep a lean root directory, you could place your ::: - + + +### Docs Available in Some Other Location \{#docs-available-in-some-other-location} + +In situations where your TechDocs source content is managed and stored in a location completely separate from your `catalog-info.yaml`, you can instead specify a URL location reference, the exact value of which will vary based on the source code hosting provider. Notice that instead of the `dir:` prefix, the `url:` prefix is used instead. Make sure the specified path contains the `mkdocs.yml` file. For example: + +- Harness Code Repository: + - Repository at account scope: `url:https://app.harness.io/ng/account/account_id/module/code/repos/repo_name` + - Repository at organization scope: `url:https://app.harness.io/ng/account/account_id/module/code/orgs/org_id/repos/repo_name` + - Repository at project scope: `url:https://app.harness.io/ng/account/account_id/module/code/orgs/org_id/projects/project_id/repos/repo_name` -In situations where your TechDocs source content is managed and stored in a location completely separate from your `catalog-info.yaml`, you can instead specify a URL location reference, the exact value of which will vary based on the source code hosting provider. Notice that instead of the `dir:` prefix, the `url:` prefix is used instead. For example: +:::info + +If your account uses a vanity hostname in the URL, such as `company_name.harness.io`, make sure you update it to the hostname used for the HCR integration (e.g., `app.harness.io`, `app3.harness.io`, etc.) when adding the URL to the annotation. To verify the correct hostname, navigate to **Admin** -> **Git Integrations**. + +![](./static/check-host-name.png) + +::: - GitHub: `url:https://githubhost.com/org/repo/tree/` - GitLab: `url:https://gitlabhost.com/org/repo/tree/` diff --git a/docs/internal-developer-portal/techdocs/static/check-host-name.png b/docs/internal-developer-portal/techdocs/static/check-host-name.png new file mode 100644 index 00000000000..da840127862 Binary files /dev/null and b/docs/internal-developer-portal/techdocs/static/check-host-name.png differ diff --git a/docs/internal-developer-portal/techdocs/techdocs-bestpractices.md b/docs/internal-developer-portal/techdocs/techdocs-bestpractices.md index ae3803480b5..89eb5d8c753 100644 --- a/docs/internal-developer-portal/techdocs/techdocs-bestpractices.md +++ b/docs/internal-developer-portal/techdocs/techdocs-bestpractices.md @@ -52,7 +52,7 @@ MkDocs can automatically generate navigation based on folder and file structures :::info -Even with auto-generated navigation, you can include an `mkdocs.yml` file. In this case, you **cannot** define a `nav` section for custom navigation, but you can still specify other configurations like `site_name` and plugins. +Even with auto-generated navigation, you can include a `mkdocs.yml` file. In this case, you **cannot** define a `nav` section for custom navigation, but you can still specify other configurations like `site_name` and plugins. ::: diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/env-variables.md b/docs/open-source/gitspaces/develop-in-gitspaces/env-variables.md new file mode 100644 index 00000000000..fa988d4e4e1 --- /dev/null +++ b/docs/open-source/gitspaces/develop-in-gitspaces/env-variables.md @@ -0,0 +1,50 @@ +--- +title: Environment Variables +description: Learn how to define environment variables for your development. +sidebar_position: 3 +sidebar_label: Environment Variables +--- + +This guide walks you through the steps required to set environment variables for your containers without altering the container images. This feature allows you to manage application configurations and customize development environments to suit your specific requirements. + +You can define environment variables in your container using the ```containerEnv``` property in the ```devcontainer.json``` specification. (Read more about the [devcontainer.json configuration](https://containers.dev/implementors/json_reference).) + +### What is "containerEnv"? +```containerEnv``` is a set of name-value pairs where each pair defines an environment variable and its corresponding value. This property sets these variables directly on the Docker container itself, meaning their scope is limited to all processes running inside the container. + +### Adding containerEnv to devcontainer.json +Here’s how you can include the ```containerEnv``` property in your ```devcontainer.json``` configuration: +``` +"containerEnv": { + "MY_CONTAINER_VAR": "some-value-here" +} +``` +This property is static for the container’s lifecycle. + +Here’s an example of a complete ```devcontainer.json``` file with the containerEnv property: +``` +{ + "image": "mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye", + "containerEnv": { + "VAR1": "value1", + "VAR2": "value2" + } +} +``` + +### Verifying Environment Variables +Once you’ve added the ```containerEnv``` property, you can verify the environment variable setup by checking the container logs while creating a Gitspace through the Harness UI. + +![](./static/env-var.png) + +### Using Environment Variables +You can start using your environment variables directly within your Gitspaces. To verify, open your Gitspace and launch the Terminal. + +Run the following command: + +```env``` + +This will display the environment variables you’ve configured, as shown in the output below. + +env 2 + diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/extensions.md b/docs/open-source/gitspaces/develop-in-gitspaces/extensions.md new file mode 100644 index 00000000000..95cb5316fa2 --- /dev/null +++ b/docs/open-source/gitspaces/develop-in-gitspaces/extensions.md @@ -0,0 +1,38 @@ +--- +title: Pre-Installed Extensions in Gitspace +description: Learn more about how you can setup pre-installed extensions for your Gitspaces with "devcontainer.json". +sidebar_position: 6 +sidebar_label: Pre-installed Extensions +--- + +This guide explains how to configure your Gitspace to automatically install extensions during setup. Let’s dive into the details. + +## What is "extensions"? +```extensions``` refer to a **Visual Studio Code-specific** property used to automate the installation of extensions in your Gitspace. This property is an array of extension IDs that specifies which extensions should be installed when the Gitspace is created. ([Read more about the specification here.](https://containers.dev/supporting)) + +Since ```"extensions"``` is a Visual Studio Code-specific property, it must be added under the ```"vscode"``` section within the ```"customizations"``` property. + +## Adding “extensions” to devcontainer.json +Here’s how to add the ```extensions``` property to your ```devcontainer.json``` configuration: +``` +"customizations": { + "vscode": { + "extensions": ["streetsidesoftware.code-spell-checker"] + } +} +``` +You can find the ```extension ID``` in the **“More info”** section of the official extension documentation. +![](./static/extensions-3.png) + +## Verifying the Setup +After adding this property, you can verify the setup by reviewing the **container logs** during the creation of a Gitspace via the Harness UI. + +![](./static/extensions-1.png) + + +## Behavior in VS Code Browser +Once your Gitspace is created and started in the VS Code Browser, the specified extensions (as defined in ```devcontainer.json```) will be pre-installed in your browser IDE within the Gitspace. + + +## Behavior in VS Code Desktop +When using VS Code Desktop, the specified extensions will not be pre-installed automatically. Instead, you’ll be prompted with a recommendation to install them upon starting your Gitspace. This is the default behavior for extensions in the VS Code Desktop environment. diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/forward-ports.md b/docs/open-source/gitspaces/develop-in-gitspaces/forward-ports.md index 60a0767aa73..19252a31a80 100644 --- a/docs/open-source/gitspaces/develop-in-gitspaces/forward-ports.md +++ b/docs/open-source/gitspaces/develop-in-gitspaces/forward-ports.md @@ -1,27 +1,77 @@ --- -title: Port Forwarding in Gitspaces +title: Port Forwarding +description: Learn how to forward ports in your Gitspaces. sidebar_position: 2 -description: Get started with forwarding ports within your Gitspaces. sidebar_label: Port Forwarding --- -You can forward ports in your Gitspaces to test and debug your application. +This guide provides a detailed walkthrough on how to configure Port Forwarding in Gitspaces, enabling you to run and test applications on your local machine. -Port forwarding gives you access to TCP ports running within your Gitspace. For example, if you're running a web application on a particular port in your Gitspace, you can forward that port. This allows you to access the application from the browser on your local machine for testing and debugging. +**Port Forwarding** in Gitspaces acts as a bridge between a remote development environment and your local machine. It allows developers to access services running inside Gitspaces as if they were running locally on their system. This feature is also helpful for debugging and testing directly in the browser without additional setup, saving time and effort. -When an application running inside a Gitspace prints output to the terminal that contains a localhost URL, such as `http://localhost:PORT` or `http://127.0.0.1:PORT`, the port is automatically forwarded. If you're using Gitspace in the browser or in Visual Studio Code, the URL string in the terminal is converted into a link that you can click to view the web page on your local machine. +## Forwarding a Port: VS Code Browser -## How to use port forwarding +### Pre-Requisites +To enable port forwarding: +- Ensure your application binds to the ```0.0.0.0``` host address. This allows the application to listen for incoming connections from all network interfaces. +- Most applications default to localhost. Change the host address to ```0.0.0.0``` to make port forwarding work. +- Ensure the correct port is configured in your application's configuration file. -1. First, you need to have a service you want to forward. If you don't have one yet but do have Node.js installed, you can run this command to start up a server on `port 3000`: +For instance, in a Node.js project, you'll have to update the host address and port number in your server.js file, as it serves as the entry point for your application. -```sh -npx serve +### Adding “forwardPorts” to devcontainer.json +You can specify which ports to forward using the forwardPorts property in the ```devcontainer.json``` file. ([Read more about the specification here.](https://containers.dev/implementors/json_reference/)) + +Here's an example configuration: +``` +{ + "image":"mcr.microsoft.com/devcontainers/javascript-node:1-22-bookworm", + "postCreateCommand": "npm install", + "forwardPorts": [9000] +} ``` -2. Then, navigate to the **Ports** view in the Panel region (**Ports: Focus on Ports View**), and select **Forward a Port**. -3. If you are already **logged in** to your git provider, you'll be auto-forwarded to `port 3000`. +1. In your ```.devcontainer/devcontainer.json``` file in your source code repository, add the “forwardPorts” attribute to forward a port. You can specify any port number as required for port forwarding. +Screenshot 2024-11-27 at 2 22 00 PM + +2. Save the file and create your Gitspace with this configuration. (Ensure you have selected VS Code Browser as your IDE while creating the Gitspace) + +3. Once your Gitspace is active and running, click on “Open VS Code Online.” You’ll be redirected to VS Code Browser. (Copy this Gitspace URL) + +Screenshot 2024-11-27 at 2 25 38 PM + +4. To access the forwarded port URL, go to the **"Ports"** section in your IDE terminal. From there, you can easily open the forwarded port URL. + +![](./static/port-forward-latest.png) + +### Understanding Port URLs +A port URL includes: +- ```Gitspace ID```: Unique identifier for your Gitspace. +- ```Port No```: Forwarded port number is added to the port URL just before the region identifier. +- ```Region```: Hosting region of the Gitspace. +- ```Token```: A secure token generated for the session. + +A Gitspace URL exists in the following form: + +- https://[```Gitspace ID```].[```Region```].gitspace.harness.io/?token=[```Token```] + +Once the port number is added before the region, the port URL exists in the following form: + +- https://[```Gitspace ID```]-[```Port Number```].[```Region```].gitspace.harness.io/?token=[```Token```] + +The port URL is public, so you can share it with others to give them access to your application. + +## Forwarding a Port: VS Code Desktop +If you're using VS Code Desktop: +- Ensure your Gitspace is active and running with VS Code Desktop as the selected IDE. +- Open the Ports section in VS Code Desktop. +- Click **Forward a Port** and enter the port number (e.g., 9000). +Screenshot 2024-11-27 at 4 27 13 PM + +- Open https://localhost:9000 in your browser to access your application. +Screenshot 2024-11-27 at 4 24 52 PM + + -4. Hovering over the **Forwarded Address**, you can use the inline actions copy the address, open it in your browser, or open an in-editor preview. diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/runArgs.md b/docs/open-source/gitspaces/develop-in-gitspaces/runArgs.md new file mode 100644 index 00000000000..7ef6af3758c --- /dev/null +++ b/docs/open-source/gitspaces/develop-in-gitspaces/runArgs.md @@ -0,0 +1,126 @@ +--- +title: Configuring Gitspace with runArgs +description: Get started with using the runArgs property to configure your Gitspace. +sidebar_position: 4 +sidebar_label: runArgs Configuration +--- + +This guide explains how to use the **```runArgs```** property in the ```devcontainer.json``` schema to configure your Gitspace. + +## What is "runArgs"? +The **```runArgs```** property is used to specify Docker CLI arguments that should be passed to the docker run command when running the Gitspace. + +The docker run command is used for pulling the CDE image (as defined in the image property of ```devcontainer.json```), creating and starting the development container from that image, and executing commands within the container. The ```runArgs``` property (defined in ```devcontainer.json```) is an array of arguments for the ```docker run``` command, enabling developers to customize the behavior of the development container. ([Read more about the specification here.](https://containers.dev/implementors/json_reference/)) + +## Adding runArgs to devcontainer.json +To include the runArgs property in your ```devcontainer.json``` configuration, use the following format: +``` +"runArgs": ["--argument", "value", ...] +``` +### Examples +#### Single Value Argument + If the argument has a single value, you can use a pair within the same array parameter, separated by an =: +``` +"runArgs": ["--blkio-weight=20"] +``` +#### Multiple Values Argument + For arguments with multiple values, use this format: +``` +"runArgs": ["--env", "var1=abcd", "var2=defg", "var3=1234"] +``` +#### No Value Argument +For arguments with no values, you can use the following format: +``` +"runArgs": ["--rm"] +``` +In this example: +``` +"runArgs": ["--restart=no", "--security-opt", "seccomp=unconfined"] +``` +- --restart has a single value (no). +- --security-opt uses a definition with different options. + +## Verifying the runArgs Setup +After adding the runArgs property, you can verify the setup by reviewing the **container logs** during the creation of a Gitspace via the Harness UI. + +![](./static/runargs-1.png) +![](./static/runargs-2.png) + +## Supported Docker CLI Arguments +As of now, the runArgs property supports the following [Docker CLI arguments](https://docs.docker.com/reference/cli/docker/container/run/): +| **Argument** | +| :---------------- | +| --add-host | +| --annotation | +| --blkio-weight | +| --cap-drop | +| --cgroup-parent | +| --cgroupns +| --cpu-count +| --cpu-percent +| --cpu-period +| --cpu-quota +| --cpu-rt-period +| --cpu-rt-runtime +| -c, --cpu-shares +| --cpus +| --cpuset-cpus +| --cpuset-mems +| --dns +| --dns-option +| --dns-search +| --domainname +| --entrypoint +| -e, --env +| --health-cmd +| --health-interval +| --health-retries +| --health-start-interval +| --health-start-period +| --health-timeout +| -h, --hostname +| --init +| --io-maxbandwidth +| --io-maxiops +| --ipc +| --isolation +| --kernel-memory +| -l, --label +| --link +| --mac-address +| -m, --memory +| --memory-reservation +| --memory-swap +| --memory-swappiness +| --network +| --no-healthcheck +| --oom-kill-disable +| --oom-score-adj +| --pid +| --pids-limit +| --platform +| --pull +| --restart +| --rm +| --runtime +| --security-opt +| --shm-size +| --stop-signal +| --stop-timeout +| --storage-opt +| --sysctl +| --ulimit +| -u, --user + + +### Allowed and Blocked Values +For some arguments, a predefined list of allowed and blocked values is enforced: +- **Allowed Values**: Values explicitly supported for a specific argument. +- **Blocked Values**: Values that are restricted for a specific argument. + +#### Current Restrictions +- **Network**: Blocked values are ```host``` and ```none```. +- **Label**: Labels with prefix ```gitspace.remote.user=``` are blocked. + + + diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/source-control.md b/docs/open-source/gitspaces/develop-in-gitspaces/source-control.md index 70adfd1ad86..f7f7706f52f 100644 --- a/docs/open-source/gitspaces/develop-in-gitspaces/source-control.md +++ b/docs/open-source/gitspaces/develop-in-gitspaces/source-control.md @@ -2,7 +2,6 @@ title: Using Source Control in your Gitspaces sidebar_position: 1 description: Understand how to use source control in your Gitspaces. -redirect_from: /docs/open-source/gitspaces/develop-in-gitspaces/pull-request sidebar_label: Source Control --- diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/static/env-var.png b/docs/open-source/gitspaces/develop-in-gitspaces/static/env-var.png new file mode 100644 index 00000000000..eaf63920f86 Binary files /dev/null and b/docs/open-source/gitspaces/develop-in-gitspaces/static/env-var.png differ diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/static/extensions-1.png b/docs/open-source/gitspaces/develop-in-gitspaces/static/extensions-1.png new file mode 100644 index 00000000000..3c55e7da503 Binary files /dev/null and b/docs/open-source/gitspaces/develop-in-gitspaces/static/extensions-1.png differ diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/static/extensions-3.png b/docs/open-source/gitspaces/develop-in-gitspaces/static/extensions-3.png new file mode 100644 index 00000000000..3adffe01d5c Binary files /dev/null and b/docs/open-source/gitspaces/develop-in-gitspaces/static/extensions-3.png differ diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/static/port-forward-latest.png b/docs/open-source/gitspaces/develop-in-gitspaces/static/port-forward-latest.png new file mode 100644 index 00000000000..62c40d00601 Binary files /dev/null and b/docs/open-source/gitspaces/develop-in-gitspaces/static/port-forward-latest.png differ diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/static/runargs-1.png b/docs/open-source/gitspaces/develop-in-gitspaces/static/runargs-1.png new file mode 100644 index 00000000000..6b639f19c0c Binary files /dev/null and b/docs/open-source/gitspaces/develop-in-gitspaces/static/runargs-1.png differ diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/static/runargs-2.png b/docs/open-source/gitspaces/develop-in-gitspaces/static/runargs-2.png new file mode 100644 index 00000000000..4dc587c3cd9 Binary files /dev/null and b/docs/open-source/gitspaces/develop-in-gitspaces/static/runargs-2.png differ diff --git a/docs/open-source/gitspaces/develop-in-gitspaces/user-config.md b/docs/open-source/gitspaces/develop-in-gitspaces/user-config.md new file mode 100644 index 00000000000..0942fb12fb9 --- /dev/null +++ b/docs/open-source/gitspaces/develop-in-gitspaces/user-config.md @@ -0,0 +1,68 @@ +--- +title: Configuring containerUser and remoteUser in Gitspace +description: Learn more about how you can configure "containerUser" and "remoteUser" in your Gitspace. +sidebar_position: 5 +sidebar_label: User Configuration +--- + +This guide explains how to set up ```containerUser``` and ```remoteUser``` in your Gitspace using the properties in the ```devcontainer.json``` schema. + +## What are "containerUser" and "remoteUser"? +These properties control the permissions of applications executed within containers, giving developers fine-grained control over container operations. ([Read more about the specification here.](https://containers.dev/implementors/spec/)) + +#### ```containerUser``` +- The user with which the **development container is started**. +- It handles all processes and operations inside the container. +- This concept is native to containers. +- The default user is either the user specified in the **Docker image** or the **root user**. + + +#### ```remoteUser``` +- The username used for **running processes inside the container**, including lifecycle scripts and any remote editor/IDE server processes. +- This concept is not native to containers. +- The default user is the same as the **containerUser**. + +## How to define a containerUser? +You can define a containerUser in the following ways: +1. Using the **containerUser** Property in ```devcontainer.json```: +``` +"containerUser": "root" +``` +2. Using the user Argument in the **runArgs** Property in ```devcontainer.json```: +``` +"runArgs": ["--user=root"] +``` +3. Defining **user** during the Image Build (Image Metadata). + +#### Priority Order: +If multiple definitions exist, the containerUser is set based on the following priority: +1. ```runArgs``` definition +2. ```devcontainer.json``` specification +3. Image Build + +## How to define a remoteUser? +You can define a remoteUser in the following ways: +1. Using the **remoteUser** Property in ```devcontainer.json```: +``` +"remoteUser": "vscode" +``` +2. Defining **remoteUser** in Image Metadata. + +#### Priority Order: +If multiple definitions exist, the remoteUser is set based on the following priority: +1. ```devcontainer.json``` specification +2. Image metadata + +:::info +**Static Password**: A static password is maintained for the remoteUser in case the user gets locked: **Harness@123**. +::: + +:::info +We assume that the user has a valid home directory. For the root user: **/root** and For non-root users: **/home/username** +::: + +## Verifying the Setup +After adding this property, you can verify the setup by reviewing the **container logs** during the creation of a Gitspace via the Harness UI. + +![](./static/runargs-2.png) + diff --git a/docs/open-source/gitspaces/get-started/gitspace-configuration.md b/docs/open-source/gitspaces/get-started/gitspace-configuration.md new file mode 100644 index 00000000000..575a260be0a --- /dev/null +++ b/docs/open-source/gitspaces/get-started/gitspace-configuration.md @@ -0,0 +1,69 @@ +--- +title: Gitspace Configuration +description: Learn more about the underlying configuration of a Gitspace. +sidebar_position: 2 +sidebar_label: Gitspace Configuration +--- + +This guide will walk you through the Gitspace configuration in detail. + +Each time a new Gitspace is created, a dedicated Development Container is provisioned on a separate Virtual Machine (VM), providing an isolated and secure development environment. + +This Development Container allows you to work within a containerized version of a build environment. Simply put, it offers a pre-configured, ready-to-code setup directly within your IDE, allowing you to start coding instantly. + +gitspace config + + +### Development Containers +Development Containers (also known as [Dev Containers](https://containers.dev/implementors/spec/)) are an open source specification for developing consistent and feature-rich development environments. + +This specification equips containers with all the tools, libraries and runtimes required to enable seamless development inside them. The development environment is configured based on metadata defined in this specification. +Each Development Container is defined by a ```devcontainer.json``` file, which configures the containerized development environment. + +### devcontainer.json File +Gitspace configuration lives with your source code in the ```.devcontainer/devcontainer.json``` file within your project’s repository. This file contains all necessary metadata and settings to define your development environment. + +The ```devcontainer.json``` specification includes various properties, allowing you to customize the environment. + +Currently, we support the following properties in a ```devcontainer.json``` file: (additional properties coming soon, read more about the [devcontainer metadata reference](https://containers.dev/implementors/json_reference/) here) + +| **Argument** | **Usage** | +| -------- | ------- | +| ```image``` | Image used to create the container | +| [```forwardPorts``` ](/docs/open-source/gitspaces/develop-in-gitspaces/forward-ports.md) | Array of ports to be forwarded from the Gitspace to the local machine (including inside the web) | +| ```postCreateCommand``` | Command to be executed after the Gitspace is created | +| ```postStartCommand``` | Command to be executed after the Gitspace is started | +| [```runArgs```](/docs/open-source/gitspaces/develop-in-gitspaces/runArgs.md) | Array of Docker CLI arguments to be used when running the Gitspace | +| [```containerEnv```](/docs/open-source/gitspaces/develop-in-gitspaces/env-variables.md) | Name-value pairs that sets/overrides environment variables for the container | +| [```containerUser```](/docs/open-source/gitspaces/develop-in-gitspaces/user-config.md) | Defines the user for all operations run as inside the container | +| [```remoteUser```](/docs/open-source/gitspaces/develop-in-gitspaces/user-config.md) | Defines the user that devcontainer.json supporting services tools / runs as in the container (including lifecycle scripts and any remote editor/IDE server processes) | +| [```extensions```](/docs/open-source/gitspaces/develop-in-gitspaces/extensions.md) | Array of extension IDs that specifies which extensions should be installed when the Gitspace is created | + + +The path for this configuration file is ```.devcontainer/devcontainer.json``` + +### Example devcontainer.json File +``` +{ + "image": "mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye", + "forwardPorts": [9000], + "postCreateCommand": "yarn install", + "postStartCommand": "npm install", + "containerEnv": { + "MY_CONTAINER_VAR": "abcd", + "MY_CONTAINER_VAR2": "efgh" + }, + "vscode": { + "extensions": ["streetsidesoftware.code-spell-checker"] + }, + "runArgs": ["--restart=no", "--security-opt", "seccomp=unconfined"], + "containerUser": "root", + "remoteUser": "vscode" +} +``` + +### Default Image +You can specify the image required to create the container in the ```devcontainer.json``` file. Any necessary application dependencies can be pre-installed in this image to save setup time. + +If a repository does not have a ```devcontainer.json```, we will spin up the CDE with a default docker image at ```mcr.microsoft.com/devcontainers/base:dev-ubuntu-24.04```. +The Dockerfile for this default image is available at [default image](https://github.com/devcontainers/images/tree/main/src/base-ubuntu). diff --git a/docs/open-source/gitspaces/get-started/quickstart.md b/docs/open-source/gitspaces/get-started/quickstart.md index fbc341d1934..dc876174ac6 100644 --- a/docs/open-source/gitspaces/get-started/quickstart.md +++ b/docs/open-source/gitspaces/get-started/quickstart.md @@ -1,7 +1,7 @@ --- title: Quickstart Guide description: Get started with Harness Open Source Gitspaces with a few simple steps. -sidebar_position: 3 +sidebar_position: 1 sidebar_label: Quickstart --- diff --git a/docs/open-source/gitspaces/overview.md b/docs/open-source/gitspaces/overview.md index c259ee29db2..aefd38d86e0 100644 --- a/docs/open-source/gitspaces/overview.md +++ b/docs/open-source/gitspaces/overview.md @@ -29,5 +29,4 @@ Each Gitspace is a docker container that is spun up on the same machine where Ha The user can then interact with the container via an IDE. Any changes made to the code are automatically synced to the container. The user can run commands in the container via the IDE, and/or commit changes to the code and push them to the remote repository. -### Multiple Gitspace The user can also create multiple Gitspace for the same repository and branch. Each Gitspace is independent of the other and you will not see changes from one Gitspace in another. This is useful when you want to work on multiple features or bug fixes in parallel. diff --git a/docs/platform/connectors/cloud-providers/ref-cloud-providers/docker-registry-connector-settings-reference.md b/docs/platform/connectors/cloud-providers/ref-cloud-providers/docker-registry-connector-settings-reference.md index 10c0adcb498..89ef1b8b259 100644 --- a/docs/platform/connectors/cloud-providers/ref-cloud-providers/docker-registry-connector-settings-reference.md +++ b/docs/platform/connectors/cloud-providers/ref-cloud-providers/docker-registry-connector-settings-reference.md @@ -78,6 +78,20 @@ The URL of the Docker registry. This is usually the URL used for your [docker lo * For JFrog Artifactory Docker registries, provide your JFrog instance URL, such as `https://mycompany.jfrog.io`. You can get this URL from the `docker-login` command on your repo's **Set Me Up** page. * For Sonatype Nexus Docker registries, provide the Nexus instance URL, such as `:` or `.`. For more information, see the Sonatype Nexus [Docker Authentication](https://help.sonatype.com/en/docker-authentication.html) documentation. +## Harness Artifact Registry (HAR) Configuration +When using the Docker Connector with Harness Artifact Registry (HAR), it's important to configure the registry URL and image names correctly to ensure seamless operation. + +- **Correct URL Format:** Set the registry URL to `https://pkg.harness.io/`. Avoid including the registry name in the URL to prevent validation errors. +- **Fully Qualified Image Name:** Provide the fully qualified image name within the step configuration, such as `pkg.qa.harness.io//harness/`. +- **Deprecated Source Type:** If using a deprecated source type, such as "image" in YAML configurations, ensure the configuration is updated to the current standard to avoid potential issues. For example, if you previously used `sourceType: image`, update it to the current standard like `sourceType: container`. + +:::tip policy enforcement and authentication +**SBOM (Software Bill of Materials) Policy Enforcement:** Ensure the registry URL is correctly configured to avoid hard-coded URL issues. + +**SLSA (Supply-chain Levels for Software Artifacts) Verification Authentication:** Double-check the authentication settings if encountering errors. +::: +--- + ## Authentication You can authenticate anonymously or by username and password. @@ -91,11 +105,9 @@ You can authenticate anonymously or by username and password. * For JFrog Docker registries, provide a password. :::info Docker registry permissions - Make sure the connected user account has *read permission for all repositories* as well as access and permissions to *pull images* and *list images and tags*. For more information, go to the Docker documentation on [Docker Permissions](https://docs.docker.com/datacenter/dtr/2.0/user-management/permission-levels/). - ::: diff --git a/docs/platform/connectors/cloud-providers/ref-cloud-providers/gcs-connector-settings-reference.md b/docs/platform/connectors/cloud-providers/ref-cloud-providers/gcs-connector-settings-reference.md index ef0f3028093..132622739d6 100644 --- a/docs/platform/connectors/cloud-providers/ref-cloud-providers/gcs-connector-settings-reference.md +++ b/docs/platform/connectors/cloud-providers/ref-cloud-providers/gcs-connector-settings-reference.md @@ -118,14 +118,6 @@ Select this option to allow the connector to inherit its authentication credenti Currently, this feature is behind the feature flag `PL_GCP_OIDC_AUTHENTICATION`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. -Additionally, the OIDC connectivity mode is not compatible with Google Cloud Functions. You can't deploy Google Cloud Functions with OIDC-enabled GCP connectors. - -::: - -:::warning - -If you are using OIDC for your connection, you will not be able to connect to GCP through the Harness Platform. Please connect through a Harness Delegate. - ::: Select the **Connect through Harness Delegate for OIDC** option to allow Harness Delegate to communicate directly with GCP through OIDC. This option uses OIDC authentication to access public cloud resources without secrets or credentials. This option requires Harness Delegate version 24.03.82603 or later. diff --git a/docs/platform/delegates/manage-delegates/configure-delegate-proxy-settings.md b/docs/platform/delegates/manage-delegates/configure-delegate-proxy-settings.md index 808f9517145..182a16b690e 100644 --- a/docs/platform/delegates/manage-delegates/configure-delegate-proxy-settings.md +++ b/docs/platform/delegates/manage-delegates/configure-delegate-proxy-settings.md @@ -12,6 +12,10 @@ All delegates include proxy settings you can use to change how the delegate conn By default, the Harness Delegate uses HTTP and HTTPS in its Proxy Scheme settings. +:::warning +When using a HTTP Helm repositories, the [default setting](/docs/platform/settings/default-settings/) `Ignore status code for HTTP connections` must be set to `true` as socket connection tests conducted by Harness from the delegate do not account for proxy details. +::: + ### Kubernetes delegate proxy settings The proxy settings are in the `harness-delegate.yaml` file: diff --git a/docs/platform/git-experience/oauth-integration.md b/docs/platform/git-experience/oauth-integration.md index 23fa176a6a0..c1930f522fd 100644 --- a/docs/platform/git-experience/oauth-integration.md +++ b/docs/platform/git-experience/oauth-integration.md @@ -87,7 +87,7 @@ Under **Connect to a Provider**, click on **Select a Provider**, CLick on **On-P ### Configure OAuth for Self-hosted Gitlab provider :::note -Currently, support for OAuth configuration with the self-hosted Gitlab provider is behind the feature flag `CDS_PROVIDERS` and `PIPE_ENABLE_GITLAB_ON_PREM_FLOW `. Please contact [Harness support](mailto:support@harness.io) to enable this feature. +Currently, support for OAuth configuration with the self-hosted and onprem Gitlab provider is behind the feature flag `CDS_PROVIDERS` and `PIPE_ENABLE_GITLAB_ON_PREM_FLOW `. Please contact [Harness support](mailto:support@harness.io) to enable this feature. Harness Delegate version 843xx or later is required for this feature. ::: diff --git a/docs/platform/harness-aida/aida-code-gen.md b/docs/platform/harness-aida/aida-code-gen.md deleted file mode 100644 index d595a8fe66e..00000000000 --- a/docs/platform/harness-aida/aida-code-gen.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Generate code with AIDA -description: Intelligent code generation using Harness AIDA. -sidebar_position: 52 -redirect_from: - - /docs/platform/harness-aida/aida-overview/aida-code-gen ---- - -Code Generation with AIDA turbocharges your coding experience. This IDE extension amplifies developer productivity by leveraging the power of Harness AI Development Assistant (AIDA) to revolutionize the way you write code, tests, and comments. - -Code Generation with AIDA gives you an intelligent assistant at your fingertips, ready to assist you in crafting high-quality code effortlessly. Harness AIDA seamlessly integrates with many popular IDEs, providing intelligent suggestions, generating code snippets, explaining existing code and even writing comprehensive tests to ensure the robustness of your applications. - -With an advanced language model behind it, Harness AIDA goes beyond conventional code completion. It understands context, anticipates your coding needs, and produces not only syntactically correct code but also thoughtful comments and robust tests, making your codebase more readable and maintainable. - -Whether you're a seasoned developer looking to speed up your workflow or a beginner seeking guidance, Harness AIDA IDE extensions are designed to enhance your coding journey. Embrace the future of software development with an IDE extension that empowers you to write better code, faster. - -Code Generation with AIDA is an IDE plugin. It is currently in development. Check back soon for more details. - -## Technical framework of Harness AIDA Code Assistant - -At the core of Harness AIDA Code Assistant's functionality is its sophisticated use of Large Language Models (LLMs), which are trained on vast datasets of code to understand and generate human-like code. These LLMs include but are not limited to OpenAI GPT models like GPT 3.5turbo and GPT 4.0. This allows Harness AIDA Code Assistant to offer contextually relevant code suggestions, explanations, and test generation. - -### Integrated Development Environment (IDE) extension - -Harness AIDA Code Assistant functions as an extension for popular Integrated Development Environments (IDEs). This design choice ensures that developers can use Harness AIDA Code Assistant's features without leaving their familiar coding environment. The extension is built to be lightweight and responsive, providing suggestions in real-time as developers type. It supports a wide range of programming languages, making it versatile across different development contexts. - -## Usage and implementation of Harness AIDA Code Assistant - -Integrating Harness AIDA Code Assistant into the software development workflow is a straightforward process designed to minimize setup time and maximize efficiency. This section outlines the steps to implement Harness AIDA Code Assistant and demonstrates its practical use in everyday coding tasks. - -### Setting up the IDE extension - -The extension is currently only available to our alpha partners. To begin using Harness AIDA Code Assistant, developers first install the extension from their IDE's marketplace. This process is similar to adding any other IDE plugin, ensuring ease of adoption. Once installed, developers can configure the extension settings to tailor Harness AIDA Code Assistant's behavior to their preferences, such as specifying which programming languages to activate it for or adjusting the sensitivity of its suggestions. - -### Securing API access - -Harness AIDA Code Assistant requires access to its AI model through Harness Gateway. This token ensures secure communication between the IDE extension and Harness AIDA Code Assistant's servers, safeguarding the code and the developer's intellectual property. The setup guide provides detailed instructions on obtaining and configuring the API token within the IDE. - -### Real-world coding scenarios - -With Harness AIDA Code Assistant activated, developers can start leveraging its features to streamline their coding tasks. For example, when writing new functions, Harness AIDA Code Assistant can suggest complete blocks of code that adhere to best practices, significantly reducing the time required to write boilerplate code. In debugging sessions, Harness AIDA Code Assistant can suggest potential fixes or even generate unit tests to cover edge cases that the developer might not have considered. - -## Features - -### Code autocomplete - -![An example of AIDA code autocompletion](./static/code_gen.gif) - -### Code explanation and comment generation - -![An example of AIDA comment generation](./static/comment_gen.gif) - -### Test generation - -![An example of AIDA test generation](./static/test_gen.gif) - -## Supported IDEs - -Currently we support the following IDEs: - -- VSCode (>= 1.86.0) - -## Ongoing improvements and research directions - -Harness AIDA Code Assistant is in a constant state of evolution, with ongoing efforts to expand its capabilities and improve its performance. Some areas of focus include: - -- Fetching external documentation: Whether it's official language documentation, framework guides, or community-contributed notes, Harness AIDA Code Assistant can present this information directly within the IDE. This saves developers time and ensures that they have access to the information they need when they need it. -- Enriched context through static code analysis: By analyzing the codebase's structure and semantics, Harness AIDA Code Assistant can provide more relevant and accurate suggestions. -- Agent-based context collection: This involves analyzing the current codebase, open files, and cursor position to understand the developer's intent and the task at hand. By gathering this context, Harness AIDA Code Assistant can tailor its suggestions to fit the specific requirements of the code being written. -- Enhancing Contextual Awareness: By refining the models' understanding of code context, Harness AIDA Code Assistant aims to provide even more relevant and accurate suggestions. -- Support for More Languages and Frameworks: Expanding the range of programming languages and development frameworks that Harness AIDA Code Assistant supports is a priority, making it a more versatile tool for a broader audience. -- User Interface Improvements: Based on user feedback, the Harness AIDA Code Assistant team continually works on making the tool more intuitive and user-friendly. -- Code refactoring suggestions: Harness AIDA Code Assistant can identify opportunities to improve the code's efficiency or readability and suggest refactored versions. Additionally, for developers learning a new programming language or framework, Harness AIDA Code Assistant can act as an on-demand mentor, providing explanations and documentation for unfamiliar code constructs. -- Model-based solution for optimal prompt finding: This approach maximizes the utility of the LLM, enabling it to generate high-quality code completions and other development aids. diff --git a/docs/platform/harness-aida/aida-overview.md b/docs/platform/harness-aida/aida-overview.md index 74914e0e7fa..2218d394fbe 100644 --- a/docs/platform/harness-aida/aida-overview.md +++ b/docs/platform/harness-aida/aida-overview.md @@ -35,7 +35,7 @@ For more information about navigation 2.0, go to [Harness navigation 2.0](https: | Platform | Harness Support | AIDA can answer questions and suggest relevant documentation to help you browse and discover Harness features and documentation. | Generally available | | Platform | Harness Support | AIDA provides content recommendations when you create a support ticket. | Generally available | | Platform | [Dashboard Intelligence](/docs/platform/dashboards/use-dashboard-intelligence-by-aida) | AIDA empowers you to craft customized dashboards with widget-level control through interactive prompts. | Generally available | -| Platform | [Code Generation](/docs/platform/harness-aida/aida-code-gen) | With the Harness AIDA IDE extension, you can increase productivity by generating multi-line code updates through comments in your IDE, eliminating the need to manually write common functions or look up unknown syntax. | Development | +| Platform | [Code Generation](/docs/platform/harness-aida/code-assistant) | With the Harness AIDA IDE extension, you can increase productivity by generating multi-line code updates through comments in your IDE, eliminating the need to manually write common functions or look up unknown syntax. | Development | | CCM | [Generate governance rules](/docs/category/harness-aida-for-asset-governance) | Generate rules for asset governance accompanied with detailed descriptions to optimize your cloud spend. | Generally available | | CD | Troubleshoot CD deployments | Resolve your deployment failures with AIDA's root cause analysis (RCA). | Generally available | | CD | Policy As Code Assistant | Generate and integrate Open Policy Agent (OPA) Rego policies to meet your compliance standards. | Development | diff --git a/docs/platform/harness-aida/code-assistant.md b/docs/platform/harness-aida/code-assistant.md index c0d3e951c12..345844da900 100644 --- a/docs/platform/harness-aida/code-assistant.md +++ b/docs/platform/harness-aida/code-assistant.md @@ -2,8 +2,6 @@ title: Harness AI Code Assistant description: Harness AI code assistant for enhanced coding productivity. sidebar_position: 52 -redirect_from: - - /docs/platform/harness-aida/aida-overview/aida-code-gen --- Harness AI code assistant consists of a set of tools that enhance coding experience by providing features such as intelligent code generation, real-time code suggestions, automated test generation, contextual code explanations, and a chat assistant for coding queries. @@ -14,59 +12,91 @@ tailored to your specific needs. For instructions on how to install and configure the Harness AI Code Assistant extension see the installation section below. ## Installation and Setup -Harness AI code assistant is currently available as an extension for Visual Studio Code. It can be installed directly from the Visual Studio Code Marketplace. -![An example of AIDA code autocompletion](./static/install.png) +Harness AI code assistant is currently available as an extension for Visual Studio Code and JetBrains IDEs. It can be installed directly from their respective marketplaces. + +### VS Code +![Installing the Harness AI Code Assistant extension in VS Code](./static/install.png) + + ### Authentication -After installing the extension, you will automatically be prompted to authenticate your account by logging in with your Harness credentials. A pop-up window will appear at the bottom right corner of your IDE, guiding you through the authentication process. -![An example of AIDA code autocompletion](./static/auth-popup.png) +After installing the extension, you will need to login to your Harness account within the IDE to enable the Harness models. You can trigger the authentication flow by clicking on the Harness icon at the bottom right corner of your IDE and selecting 'Harness login' from the dropdown menu. +![How to log in to Harnsss models](./static/ca_login.png) Once you click the login button, you will be redirected to the Harness login page to enter your credentials. ![An example of AIDA code autocompletion](./static/login.png) -If the login window does not appear automatically, or if you want to redo -the authentication process at any time, you can manually trigger the authentication flow by clicking on the Harness icon at the bottom right corner of your IDE and selecting `Harness AI: Authentication' from the dropdown menu. -![An example of AIDA code autocompletion](./static/auth.png) - ## Using Harness AI Code Assistant: Main Features After installing the Harness extension and completing the login process, you can immediately begin using the powerful features of the Harness AI Code Assistant within your IDE. -Harness AI Code Assistant offers four key features: Code Generation, Comment Generation, Test Generation, and a Chat Assistant. Below, we provide a detailed explanation of each feature. +Harness AI Code Assistant offers four key features: Code Generation, Chat Assistant and Inline Editing. Below, we provide a detailed explanation of each feature. ### Code Generation Code generation works by showing real-time inline code suggestions. The suggestions are generated based on the context of the code being written, and additional information such as relevant files and code snippets in the workspace. Once a suggestion is shown to the user, they can accept it fully or word by word, allowing for customization before finalizing the code. ![An example of AIDA code autocompletion](./static/code_gen.gif) -### Comment Geeration -Harness AI code assistant can be used to generate inline comments that explain the purpose and functionality of the user code, making it more understandable for future reference and for other developers who may work on the code. The user can invoke the comment generation feature by selecting the relevant code block and choosing the generate comments option from the right-click menu. -![An example of AIDA comment generation](./static/comment_gen.gif) - -### Test Generation -Test generation allows developers to automatically create unit tests for their code. By analyzing the existing code structure and logic, Harness AI code assistant can suggest comprehensive test cases that cover various scenarios, including edge cases. Test generation can be initiated by selecting the function or code block for which tests are needed and choosing the generate tests option from the right-click menu. -![An example of AIDA test generation](./static/test_gen.gif) - +### Popup Menu +When you select code in the editor and right click, a popup menu will appear, providing options to add selected code to a chat session, fix grammar or spelling, fix the code, optimize the code, write a docstring or add comments to the code. +![Code assistant popup mentu](./static/ca_right_click.png) ### Chat Assistant -Harness AI provides an interactive chat assistant that can answer coding queries, provide explanations, and assist with debugging. You can access the chat assistant by clicking on the Harness icon on the left sidebar of your IDE. This will open a chat window where you can type your questions or requests, and the assistant will respond with relevant information or code suggestions. -![An example of AIDA code autocompletion](./static/chat1-julia.gif) - +Harness AI provides a powerful interactive chat assistant that can answer coding queries, provide explanations, generate new files, write tests, and assist with debugging. You can access the chat assistant by clicking on the Harness icon on the left sidebar of your IDE. This will open a chat window where you can type your questions or requests, and the assistant will respond with relevant information or code suggestions. +![An example of AIDA code chat](./static/ca_explain_code.png) ## Harness AI Chat: Features and Usage In this section we will explain some of the key features of the Harness AI Chat Assistant and how to effectively utilize them for your coding needs. +### Shortcut Keys +You can see a list of shortcut keys by clicing on `...` at the top of your chat window. +![Shortcut Keys](./static/ca_shortcuts.png) + +### Reference the entire codebase +Harness AI Code Assistant indexes all of the code in your current workspace when VS Code launches. This semantic index makes the entire codebase available as context your chat session. By defualt, the chat assistant uses the context of the current file to provide relevant suggestions and answers. You can reference the entire codebase by clicking cmd + enter when entering your prompt. Alternatively, you can reference the entire codebase with `@codebase` in your prompt. + +You can refresh the codebase index by clicking on `...` at the top of your chat window +![Codebase Index](./static/ca_index.png) + ### Adding References -By defualt, the chat assistant uses the context of the current file to provide relevant suggestions and answers. You can enhance the assistant's capabilities by adding references to other files or libraries that are relevant to your query. In order to add references, simply type `@' in the chat window and a pop-up will appear allowing you to select the files or libraries you want to reference. -![An example of AIDA code autocompletion](./static/file-tag.png) + You can enhance the assistant's capabilities by adding references to specific files or libraries that are relevant to your query to get a more specific response. In order to add references, simply type `@' in the chat window and a pop-up will appear allowing you to select the files or libraries you want to reference. +![An example of AIDA code autocompletion](./static/ca_file_tag.png) + +## Troubleshooting + +### Networking Issues + +#### Configure Certificates +If your network requires custom certificates, you will need to configure them in config.json. + +To open the config.json file for Harness AI Code Assistant click on the gear icon in the Harness chat window. +![Open the Harness config.json](./static/ca_config.png) + +In each "models" array entry for "Harness" add requestOptions.caBundlePath like this: + +``` +{ + "models": [ + { + "title": "Harness", + ... + "requestOptions": { + "caBundlePath": "/path/to/cert.pem" + } + } + ], + ... +} +``` -### Inserting Code Snippets -The Harness AI chat is an interactive tool designed to help users ask questions and receive responses in the form of code snippets or detailed text explanations. Recognizing that users often want to integrate the suggested code into their own projects, the Harness Chat Assistant simplifies this process. It provides a convenient toolbar above each code snippet, allowing users to effortlessly insert the suggested code at their current cursor position or into a new file, streamlining the workflow. -![An example of AIDA code autocompletion](./static/insert-julia.gif) +You may also set requestOptions.caBundlePath to an array of paths to multiple certificates. +#### VS Code Proxy Settings +If you are using VS Code and require requests to be made through a proxy, you are likely already set up through VS Code's Proxy Server Support. To double-check that this is enabled, use cmd/ctrl + , to open settings and search for "Proxy Support". Unless it is set to "off", then VS Code is responsible for making the request to the proxy. ## Frequently Asked Questions diff --git a/docs/platform/harness-aida/static/ca_config.png b/docs/platform/harness-aida/static/ca_config.png new file mode 100644 index 00000000000..382c839c335 Binary files /dev/null and b/docs/platform/harness-aida/static/ca_config.png differ diff --git a/docs/platform/harness-aida/static/ca_explain_code.png b/docs/platform/harness-aida/static/ca_explain_code.png new file mode 100644 index 00000000000..fa79cf60eba Binary files /dev/null and b/docs/platform/harness-aida/static/ca_explain_code.png differ diff --git a/docs/platform/harness-aida/static/ca_file_tag.png b/docs/platform/harness-aida/static/ca_file_tag.png new file mode 100644 index 00000000000..fb5cf8ddfc0 Binary files /dev/null and b/docs/platform/harness-aida/static/ca_file_tag.png differ diff --git a/docs/platform/harness-aida/static/ca_index.png b/docs/platform/harness-aida/static/ca_index.png new file mode 100644 index 00000000000..20c743e72c5 Binary files /dev/null and b/docs/platform/harness-aida/static/ca_index.png differ diff --git a/docs/platform/harness-aida/static/ca_login.png b/docs/platform/harness-aida/static/ca_login.png new file mode 100644 index 00000000000..398d0877752 Binary files /dev/null and b/docs/platform/harness-aida/static/ca_login.png differ diff --git a/docs/platform/harness-aida/static/ca_right_click.png b/docs/platform/harness-aida/static/ca_right_click.png new file mode 100644 index 00000000000..c1640fab4aa Binary files /dev/null and b/docs/platform/harness-aida/static/ca_right_click.png differ diff --git a/docs/platform/harness-aida/static/ca_shortcuts.png b/docs/platform/harness-aida/static/ca_shortcuts.png new file mode 100644 index 00000000000..0765ffe6dc5 Binary files /dev/null and b/docs/platform/harness-aida/static/ca_shortcuts.png differ diff --git a/docs/platform/triggers/static/tag_event_execution.png b/docs/platform/triggers/static/tag_event_execution.png new file mode 100644 index 00000000000..5c9cd78f005 Binary files /dev/null and b/docs/platform/triggers/static/tag_event_execution.png differ diff --git a/docs/platform/triggers/trigger-pipeline-on-tag-event.md b/docs/platform/triggers/trigger-pipeline-on-tag-event.md new file mode 100644 index 00000000000..d7552e8a732 --- /dev/null +++ b/docs/platform/triggers/trigger-pipeline-on-tag-event.md @@ -0,0 +1,70 @@ +--- +title: Trigger pipelines on pushing new tag +description: Trigger pipelines on pushing new tag in your repository. +--- + +You can set up your pipeline to automatically trigger whenever a new tag is pushed to your repository. This guide walks you through the steps to achieve this using a webhook trigger. + +### Pre-Requisite + +1. [Code repo connector](/docs/category/code-repo-connectors) that connects to your Git provider account. +2. Required [Code repo connector permissions for webhook triggers](/docs/platform/triggers/triggers-reference#code-repo-connector-permissions-for-webhook-triggers). +3. Harness CI/CD Pipeline + +### Steps to Trigger a Pipeline on a New Tag + +1. In your Harness pipeline, create a webhook trigger and configure the [Event](/docs/platform/triggers/triggers-reference#event-and-actions) type as **Push**. + +2. When you create and push a new tag (e.g., v1) in your repository, Harness identifies it as a push event and triggers the configured pipeline. + +Example: You push a tag v1 to the repository, which triggers the pipeline execution. + +![](./static/tag_event_execution.png) + +You can verify the payload received by the trigger in the Activity History section of the trigger. An example payload is shown below: + +```json +{ + "ref": "refs/tags/v1", + "before": "0000000000000000000000000000000000000000", + "after": "0b958e797de7e9c55a4c8875b3d3a86c823cfd4b", + "repository": { + "id": 897881853, + "node_id": "R_kgDONYSW_Q", + "name": "azure-function-python", + "full_name": "krishi0408/azure-function-python", + "private": false, + "owner": { + "name": "krishi0408", + "email": "user@harness.io", + "login": "krishi0408", + "id": 109092049, + "node_id": "U_kgDOBoCc0Q", + "avatar_url": "https://avatars.githubusercontent.com/u/109092049?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/krishi0408", + "html_url": "https://github.com/krishi0408", + "followers_url": "https://api.github.com/users/krishi0408/followers", + "following_url": "https://api.github.com/users/krishi0408/following{/other_user}", + "gists_url": "https://api.github.com/users/krishi0408/gists{/gist_id}", + "starred_url": "https://api.github.com/users/krishi0408/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/krishi0408/subscriptions", + "organizations_url": "https://api.github.com/users/krishi0408/orgs", + "repos_url": "https://api.github.com/users/krishi0408/repos", + "events_url": "https://api.github.com/users/krishi0408/events{/privacy}", + "received_events_url": "https://api.github.com/users/krishi0408/received_events", + "type": "User", + "user_view_type": "public", + "site_admin": false + } + } +} +``` +The `ref` field indicates the new tag, in this case, refs/tags/v1. + + + + + + + diff --git a/docs/platform/triggers/trigger-pipelines-using-generic-events.md b/docs/platform/triggers/trigger-pipelines-using-generic-events.md index 7ab2da00c8e..c411c7a9904 100644 --- a/docs/platform/triggers/trigger-pipelines-using-generic-events.md +++ b/docs/platform/triggers/trigger-pipelines-using-generic-events.md @@ -1,6 +1,6 @@ --- -title: Generic Webhook triggers -description: This topic provides settings information to setup a generic webhook triggers +title: EventRelay Generic Webhook triggers +description: This topic provides information on configuring EventRelay generic webhook triggers --- Trigger Pipelines Using Artifact Changes from Any Third-Party Repository diff --git a/docs/platform/triggers/trigger-pipelines-using-slack-events.md b/docs/platform/triggers/trigger-pipelines-using-slack-events.md index e50b838ec3f..e8e7905f193 100644 --- a/docs/platform/triggers/trigger-pipelines-using-slack-events.md +++ b/docs/platform/triggers/trigger-pipelines-using-slack-events.md @@ -1,6 +1,6 @@ --- -title: Slack Webhook triggers -description: This topic provides settings information to setup a slack webhook trigger +title: EventRelay Slack Webhook triggers +description: This topic provides information on configuring EventRelay slack webhook triggers --- Trigger Pipelines Using Slack Events diff --git a/docs/platform/variables-and-expressions/harness-variables.md b/docs/platform/variables-and-expressions/harness-variables.md index 888eb8c455e..33366965f60 100644 --- a/docs/platform/variables-and-expressions/harness-variables.md +++ b/docs/platform/variables-and-expressions/harness-variables.md @@ -217,6 +217,14 @@ Additionally, variable values (after evaluation) are limited to 256 KB. Expressi ::: +:::note + +In Harness NG pipelines, undefined variables in expressions cause pipeline execution errors. This behavior differs from FirstGen (FG) pipelines, where undefined variables default to null. NG pipelines require explicit handling of undefined variables to avoid failures. + +For example, if the expression `<+pipeline.variables.test>` is used and the pipeline variable `test` is not defined, the expression resolution will fail, causing the step or stage to fail. + +::: + ## Expression manipulation In addition to standard evaluation, expressions can be evaluated and manipulated with Java string methods, JSON parsing, JEXL, interpolation, concatenation, and more. diff --git a/docs/self-managed-enterprise-edition/install/helm-chart-provenance.md b/docs/self-managed-enterprise-edition/install/helm-chart-provenance.md new file mode 100644 index 00000000000..00dcca06b12 --- /dev/null +++ b/docs/self-managed-enterprise-edition/install/helm-chart-provenance.md @@ -0,0 +1,79 @@ +--- +title: Harness Helm Charts Provenance +sidebar_label: Harness Helm Charts Provenance +date: 2025-01-09T23:00 +sidebar_position: 12 +--- + +# Harness Helm Charts Provenance + +Harness Helm charts are now signed to ensure they are secure and trustworthy. + +Starting with version [0.24.0](/release-notes/self-managed-enterprise-edition), you can verify the integrity and origin of the charts using GPG keys with Helm's provenance feature. + +## How to Verify Signed Helm Charts + +### Step 1: Install GnuPG +First, ensure you have GnuPG installed to handle the GPG keys. + +```bash +apk add --no-cache gnupg +``` + +### Step 2: Import the GPG Public Key +Import the Harness public key used to sign the charts. This key will be used to verify the signature. + +```bash +gpg --keyserver hkps://keys.openpgp.org --receive-keys '6117ED4CA5F4605DBF4353F41F6E943934E6D138' +``` + +### Step 3: Convert Keyring to Legacy Format +Convert the GPG keyring to a legacy format to be recognized by Helm provenance verification. + +```bash +gpg --export >~/.gnupg/pubring.gpg +gpg --export-secret-keys >~/.gnupg/secring.gpg +``` + +### Step 4: Verify the Helm Chart +Helm charts can be verified by downloading the chart or pulling it from the Helm repository. + +#### a. Verify Downloaded Chart +Download the Helm chart and its corresponding provenance file from the **[GitHub releases page](https://github.com/harness/helm-charts/releases/tag/harness-0.24.0)** (`*.tgz` and `*.tgz.prov` under Assets). + +```bash +helm verify harness-0.24.0.tgz +``` + +#### b. Verify Using Helm Repository +1. Add the Harness Helm repository: + + ```bash + helm repo add harness https://harness.github.io/helm-charts/ + ``` + +2. Update the Helm repository: + + ```bash + helm repo update + ``` + +3. Pull the chart and verify it with the specified chart version: + + ```bash + helm pull --verify harness/harness --version=0.24.0 + ``` + +### Step 5: Successful Verification +A successful verification will display output similar to the following: + +```plaintext +Signed by: Harness Inc. (Main key for Helm chart signing) +Using Key With Fingerprint: 6117ED4CA5F4605DBF4353F41F6E943934E6D138 +Chart Hash Verified: sha256:a1af3a0b8b54050070e15953c1c964a595720be2640c59fb2df947c259d18247 +``` +*** + +:::info Additional Information +For more details on Helm chart signing and verification, please refer to the [official Helm documentation](https://helm.sh/docs/topics/provenance/). +::: \ No newline at end of file diff --git a/docs/self-managed-enterprise-edition/performance-reports.md b/docs/self-managed-enterprise-edition/performance-reports.md index 4f83d278abb..043ff7b0666 100644 --- a/docs/self-managed-enterprise-edition/performance-reports.md +++ b/docs/self-managed-enterprise-edition/performance-reports.md @@ -30,8 +30,189 @@ Concurrent CD executions involve fetching Docker artifacts from external reposit Harness publishes performance test reports with each release. Select a report below to view report details. +### 2025 reports + +
+January 08, 2025 + +This document details information about following : + +1. Test Environment and resource configuration +2. Test scenario and results + +### [Environment](#) +- GKE (Kubernetes Server Version) : 1.30.x + +### [Database](#) +- Mongo Atlas M60 + +### [Redis](#) +- GCP Memory Store (11 GB) + +### [Harness Services](#) + +[Helm chart](https://github.com/harness/helm-charts/releases/tag/harness-0.23.0) +:::info +- **The below performance tests results are obtained using Managed MongoDB and Managed Redis (as mentioned above).** +- **The default SMP Helm Chart includes in-cluster MongoDB and Redis Sentinel, so performance results may vary accordingly.** +::: + +| Service Name | Replicas | CPU (per replica) | Memory (per replica) | Version | +|--------------------------|:--------:|:-----------------:|:--------------------:|:--------------:| +| access-control | 4 | 1 | 5 | harness-0.23.0 | +| ci-manager | 4 | 3 | 6 | harness-0.23.0 | +| pipeline-service | 8 | 4 | 10 | harness-0.23.0 | +| manager | 7 | 3 | 12 | harness-0.23.0 | +| log-service | 4 | 3 | 12 | harness-0.23.0 | +| ng-manager | 6 | 2 | 6 | harness-0.23.0 | +| scm | 2 | 0.5 | 1 | harness-0.23.0 | +| gateway | 5 | 1 | 4 | harness-0.23.0 | +| default-backend | 1 | 0.1 | 0.2 | harness-0.23.0 | +| nginx-ingress-controller | 1 | 5 | 10 | harness-0.23.0 | +| change-data-capture | 1 | 4 | 6 | harness-0.23.0 | +| next-gen-ui | 2 | 0.5 | 0.5 | harness-0.23.0 | +| ng-auth-ui | 2 | 0.1 | 0.1 | harness-0.23.0 | +| platform-service | 2 | 0.5 | 3 | harness-0.23.0 | +| template-service | 2 | 1 | 8 | harness-0.23.0 | +| sto-core | 4 | 0.5 | 1.5 | harness-0.23.0 | +| sto-manager | 2 | 3 | 6 | harness-0.23.0 | +| ui | 3 | 0.1 | 0.5 | harness-0.23.0 | +| policy-mgmt | 3 | 0.3 | 1 | harness-0.23.0 | +| timescaledb | 2 | 1 | 2 | harness-0.23.0 | +| ng-dashboard-aggregator | 2 | 0.25 | 2 | harness-0.23.0 | + +#### Override file : https://github.com/harness/helm-charts/blob/main/src/harness/override-perf.yaml + +### [Test Scenarios](#) + +#### [ > 3500 concurrent CI Executions [INLINE]](#) +Each CI pipeline would +- initialise a k8s pod and git clone repo +- run 5 parallel steps (100 sec sleep) +- run template with 2 parallel steps (140sec sleep) + +Projects : 1 +Pipelines : 3500 +Stages per pipeline : 1 +Delegates : 15 (1cpu/4gi) +Trigger type : webhook +Test class : `CI_PIPELINE_WEBHOOK_RUN` + +> Result : **PASS** +Avg Execution Time: **5min 50sec** + +#### [ > 2700 concurrent CD Executions [INLINE]](#) +Each CD pipeline would +- fetch docker artifact from AWS ECR repo +- run following steps in order: + - Canary deploy + - Canary delete + - Rolling deploy + - K8s Delete + +Projects : 1 +Pipelines : 2700 +Stages per pipeline : 1 +Delegates : 80 (1cpu/4gi) +Test class : `CD_PIPELINE_WEBHOOK_RUN` + +> Result : **PASS** +Avg Execution Time: **5min 55sec** + +
+ ### 2024 reports +
+August 28,2024 + +This report details information about the following: + +1. Test environment and resource configuration +2. Test scenario and results + +### [Environment](#) +- GKE (Kubernetes Server Version) : 1.29.x + +### [Database](#) +- Mongo Atlas M60 + +### [Redis](#) +- GCP Memory Store (11 GB) + +### [Harness Services](#) + +[Helm chart](https://github.com/harness/helm-charts/releases/tag/harness-0.19.0) + +:::note +- **The below performance tests results are obtained using Managed MongoDB and Managed Redis (as mentioned above).** +- **The default SMP Helm Chart includes in-cluster MongoDB and Redis Sentinel, so performance results may vary accordingly.** +::: + +| Service Name | Replicas | CPU (per replica) | Memory (per replica) | Version | +|--------------------------|:--------:|:-----------------:|:--------------------:|:--------------:| +| access-control | 4 | 1 | 5 | harness-0.19.0 | +| ci-manager | 4 | 3 | 6 | harness-0.19.0 | +| pipeline-service | 8 | 4 | 10 | harness-0.19.0 | +| manager | 7 | 3 | 12 | harness-0.19.0 | +| log-service | 3 | 3 | 12 | harness-0.19.0 | +| ng-manager | 6 | 2 | 6 | harness-0.19.0 | +| scm | 2 | 0.5 | 1 | harness-0.19.0 | +| gateway | 5 | 1 | 4 | harness-0.19.0 | +| default-backend | 1 | 0.1 | 0.2 | harness-0.19.0 | +| nginx-ingress-controller | 1 | 5 | 10 | harness-0.19.0 | +| change-data-capture | 1 | 4 | 6 | harness-0.19.0 | +| next-gen-ui | 2 | 0.5 | 0.5 | harness-0.19.0 | +| ng-auth-ui | 2 | 0.1 | 0.1 | harness-0.19.0 | +| platform-service | 2 | 0.5 | 3 | harness-0.19.0 | +| template-service | 2 | 1 | 8 | harness-0.19.0 | +| sto-core | 4 | 0.5 | 1.5 | harness-0.19.0 | +| sto-manager | 2 | 3 | 6 | harness-0.19.0 | +| ui | 3 | 0.1 | 0.5 | harness-0.19.0 | +| policy-mgmt | 3 | 0.3 | 1 | harness-0.19.0 | +| timescaledb | 2 | 1 | 2 | harness-0.19.0 | +| ng-dashboard-aggregator | 2 | 0.25 | 2 | harness-0.19.0 | + +#### Override file : https://github.com/harness/helm-charts/blob/main/src/harness/override-perf.yaml + +### [Test Scenarios](#) + +#### [ > 3300 concurrent CI Executions [INLINE]](#) +Each CI pipeline would +- initialise a k8s pod and git clone repo +- run 5 parallel steps (100 sec sleep) +- run template with 2 parallel steps (140sec sleep) + +Projects : 1 +Pipelines : 3300 +Stages per pipeline : 1 +Delegates : 15 (1cpu/4gi) +Trigger type : webhook +Test class : `CI_PIPELINE_WEBHOOK_RUN` + +> Result : **PASS** +Avg Execution Time: **6min 31sec** + +#### [ > 2500 concurrent CD Executions [INLINE]](#) +Each CD pipeline would +- fetch docker artifact from AWS ECR repo +- run following steps in order: + - Canary deploy + - Canary delete + - Rolling deploy + - K8s Delete + +Projects : 1 +Pipelines : 2500 +Stages per pipeline : 1 +Delegates : 70 (1cpu/4gi) +Test class : `CD_PIPELINE_WEBHOOK_RUN` + +> Result : **PASS** +Avg Execution Time: **5min 52sec** + +
+
July 8, 2024 diff --git a/docs/software-engineering-insights/get-started/migrate-propelo-to-harness/migrate-propelo-to-harness-sei.md b/docs/software-engineering-insights/get-started/migrate-propelo-to-harness/migrate-propelo-to-harness-sei.md index a1297813e67..e8d30e4e9d6 100644 --- a/docs/software-engineering-insights/get-started/migrate-propelo-to-harness/migrate-propelo-to-harness-sei.md +++ b/docs/software-engineering-insights/get-started/migrate-propelo-to-harness/migrate-propelo-to-harness-sei.md @@ -238,6 +238,18 @@ If you notice any inconsistencies in the migrated data, please create a support Note that all secrets associated with your existing Propelo account will be securely transferred from Propelo's Google KMS infrastructure to Harness's Google KMS infrastructure. ::: +### Account overview + +To get an overview of your **Harness account**, go to **Account Settings**. Under General, select **Account Details** to open the Account Overview page. +The **Account Details** section enables you to view your account information and set your default experience. + +* **Account Name:** Your account name. +* **Account Id:** Your account ID. +* **Harness Cluster:** The Harness cluster assigned to your account. +* **Default Experience:** This setting allows environment administrators to set the default generation landing page for users to ensure the correct Harness experience, Harness First Generation or Harness Next Generation, is provided. + +![](../static/account-environment.png) + ## Manual steps to be performed ### SSO Configuration @@ -267,7 +279,7 @@ If you were using an Ingestion Satellite for on-premise integrations, follow the ![](../static/MIGRATION-SATELLITE.png) -If you’re unsure which environment to use, contact the Support Executive assigned to your Harness account. +To determine which Harness cluster is assigned to your account, go to your[account details](#account-overview) page. After updating the `satellite.yml` file, run the container to schedule the ingestion process for your integrations. @@ -310,7 +322,7 @@ To generate the API token: ![](../static/jenkins-key-harness.png) -* Set the **Application Type** to the environment where you are configuring the **Plugin** i.e. the environment where your Harness Account is hosted on. +* Set the **Application Type** to the environment where you are configuring the **Plugin** i.e. the cluster where your Harness Account is hosted on. * Add the [Jenkins Username](#step-1-generate-jenkins-user-credentials) and [Jenkins User Token (Jenkins API Token)](#step-1-generate-jenkins-user-credentials). * Save the plugin settings. @@ -321,7 +333,7 @@ To generate the API token: If your Jenkins plugin version is newer than 1.0.30, follow these steps: * Go to **Jenkins plugin** settings. -* Set the **Application Type** to the **Harness SEI environment** where your account is hosted. Contact [Harness Support](mailto:support@harness.io) to confirm where your account is hosted. +* Set the **Application Type** to the **Harness SEI environment** where your account is hosted. View the [Account details](#account-overview) page to confirm where your account is hosted. #### Verification diff --git a/docs/software-engineering-insights/get-started/sei-onboarding-guide.md b/docs/software-engineering-insights/get-started/sei-onboarding-guide.md index cb9055d5815..ba21647003b 100644 --- a/docs/software-engineering-insights/get-started/sei-onboarding-guide.md +++ b/docs/software-engineering-insights/get-started/sei-onboarding-guide.md @@ -81,10 +81,10 @@ If you're migrating to Harness SEI from Propelo standalone application, review t | Manage Insight association | Associate or Disassociate Insights with collections | [Click here](/docs/software-engineering-insights/insights/sei-insights#manage-insights-associations) | | View Insight | View existing Insights | [Click here](/docs/software-engineering-insights/insights/sei-insights#view-insights) | | Explore Insight data | Derive value out of the widget data on the Insights | [Click here](/docs/software-engineering-insights/insights/sei-insights#explore-data) | -| Create DORA Insight | Learn how to create a DORA Insight using various DORA widgets | [Click here](/docs/software-engineering-insights/insights/dora-insight) | -| Create Trellis Insight | Learn how to create a Trellis Insight using various Trellis widgets | [Click here](/docs/software-engineering-insights/insights/trellis-insight) | -| Create Sprints Insight | Learn how to create a Sprints Insight using various sprint metric widgets | [Click here](/docs/software-engineering-insights/insights/sprint-metrics-insight) | -| Create Developer Insight | Learn how to create a Developer Insight using SCM metric widgets | [Click here](/docs/software-engineering-insights/insights/developer-insight) | +| Create DORA Insight | Learn how to create a DORA Insight using various DORA widgets | [Click here](/docs/software-engineering-insights/insights/insight-tutorials/dora-insight) | +| Create Trellis Insight | Learn how to create a Trellis Insight using various Trellis widgets | [Click here](/docs/software-engineering-insights/insights/insight-tutorials/trellis-insight) | +| Create Sprints Insight | Learn how to create a Sprints Insight using various sprint metric widgets | [Click here](/docs/software-engineering-insights/insights/insight-tutorials/agile-insights) | +| Create Developer Insight | Learn how to create a Developer Insight using SCM metric widgets | [Click here](/docs/software-engineering-insights/insights/insight-tutorials/developer-insight) | ### Phase 7: Role-Based Access Control @@ -269,10 +269,10 @@ Review the following resources to learn more about creating and managing Insight ### Tutorials -* [DORA Insight](/docs/software-engineering-insights/insights/dora-insight) -* [Trellis Scores Insight](/docs/software-engineering-insights/insights/trellis-insight) -* [Developer Insight](/docs/software-engineering-insights/insights/developer-insight) -* [Sprints Insight](/docs/software-engineering-insights/insights/sprint-metrics-insight) +* [DORA Insight](/docs/software-engineering-insights/insights/insight-tutorials/dora-insight) +* [Trellis Scores Insight](/docs/software-engineering-insights/insights/insight-tutorials/trellis-insight) +* [Developer Insight](/docs/software-engineering-insights/insights/insight-tutorials/developer-insight) +* [Sprints Insight](/docs/software-engineering-insights/insights/insight-tutorials/agile-insights) ## Phase 7: Role-Based Access Control @@ -313,7 +313,7 @@ Find the list of demos for the latest Harness SEI features. | Project Level Settings on Harness SEI |Watch on YouTube | [Click here](/docs/software-engineering-insights/) | | Business Alignment |Watch on YouTube | [Click here](/docs/software-engineering-insights/sei-metrics-and-reports/planning/sei-business-alignment-reports) | | Trellis Scores - Reimagined - BETA |Watch on YouTube | [Click here](/docs/software-engineering-insights/early-access/profiles/sei-trellis-factors) | -| DORA Metrics Explained |Watch on YouTube | [Click here](/docs/software-engineering-insights/insights/dora-insight) | -| Dev Insight Explained |Watch on YouTube | [Click here](/docs/software-engineering-insights/insights/developer-insight) | -| Sprints Insight Explained |Watch on YouTube | [Click here](/docs/software-engineering-insights/insights/sprint-metrics-insight) | +| DORA Metrics Explained |Watch on YouTube | [Click here](/docs/software-engineering-insights/insights/insight-tutorials/dora-insight) | +| Dev Insight Explained |Watch on YouTube | [Click here](/docs/software-engineering-insights/insights/insight-tutorials/developer-insight) | +| Sprints Insight Explained |Watch on YouTube | [Click here](/docs/software-engineering-insights/insights/insight-tutorials/agile-insights) | | Collections Walkthrough |Watch on YouTube | [Click here](/docs/software-engineering-insights/sei-projects-and-collections/manage-collections) | \ No newline at end of file diff --git a/docs/software-engineering-insights/get-started/static/account-environment.png b/docs/software-engineering-insights/get-started/static/account-environment.png new file mode 100644 index 00000000000..647d86aaedd Binary files /dev/null and b/docs/software-engineering-insights/get-started/static/account-environment.png differ diff --git a/docs/software-engineering-insights/insights/insight-tutorials/_category_.json b/docs/software-engineering-insights/insights/insight-tutorials/_category_.json new file mode 100644 index 00000000000..fb4eb0b2817 --- /dev/null +++ b/docs/software-engineering-insights/insights/insight-tutorials/_category_.json @@ -0,0 +1,11 @@ +{ + "label":"Tutorials", + "position": 30, + "collapsible":"true", + "collapsed":"true", + "className":"default", + "link":{ + "type":"generated-index", + "title":"Tutorials" + } + } \ No newline at end of file diff --git a/docs/software-engineering-insights/insights/sprint-metrics-insight.md b/docs/software-engineering-insights/insights/insight-tutorials/agile-insights.md similarity index 98% rename from docs/software-engineering-insights/insights/sprint-metrics-insight.md rename to docs/software-engineering-insights/insights/insight-tutorials/agile-insights.md index 09292d51775..7520401c9f1 100644 --- a/docs/software-engineering-insights/insights/sprint-metrics-insight.md +++ b/docs/software-engineering-insights/insights/insight-tutorials/agile-insights.md @@ -59,7 +59,7 @@ You can also create new integrations and associate the integration with the curr 2. Go the **Project** scope and select your **Project**. 3. If you don't have any existing Insight, then click on the **Create Insight** button on the landing page to create a new Insight. -![](./static/create-insight.png) +![](../static/create-insight.png) 4. If you already have existing Insights in your selected Project, then go to any Insight. For instructions, go to [View Insights](#view-insights). 5. In the header, select **All Insights**, and then select **Manage Insights**. @@ -85,7 +85,7 @@ Engineering managers usually use these three key sprint metrics to get a holisti The **Sprint Metrics Trend Report** is recommended for visualizing a time series trend of these metrics to help engineering managers understand how the respective sprint metrics has evolved throughout various sprint cycles. -![](./static/sprint-trend.png) +![](../static/sprint-trend.png) To add the **Sprint Metrics Trend Report** widget to Insights: @@ -112,7 +112,7 @@ But what happens when there are issues with the issues? That's where the hygiene You can use this report to measure and track the hygiene score for the previous sprint or the active sprints. -![](./static/active-sprint-hygiene.png) +![](../static/active-sprint-hygiene.png) To add the **Issue Hygiene Report** widget to Insights: @@ -130,7 +130,7 @@ To add the **Issue Hygiene Report** widget to Insights: The Issues Report is used to examine metrics related to issues in your issue management system. The report aggregates data based on selected attributes, such as priority, status, labels, components, or any other field. This report helps you create comparisons based on various fields and draw conclusions to make decisions. -![](./static/issues-report.png) +![](../static/issues-report.png) To add the **Issues Report** widget to Insights: @@ -159,7 +159,7 @@ To configure the widget for various other use cases, go to [Issues Report Use Ca The Issue Resolution Time Report is a customizable bar graph that displays the number of tickets that were closed and the average time it took to close them based on their creation time. -![](./static/issue-resolution-time-report.png) +![](../static/issue-resolution-time-report.png) To add the **Issue Resolution Time Report** widget to Insights: @@ -178,7 +178,7 @@ The **Sprint Metrics Single Stat** widget presents a single sprint metric averag For example, the Sprint Metrics Single Stat widget can help you use historical metrics for sprint prediction and performance assessment. -![](./static/sprint-single-stat.png) +![](../static/sprint-single-stat.png) To add the **Sprint Metrics Single Stat Report** widget to Insights: diff --git a/docs/software-engineering-insights/insights/developer-insight.md b/docs/software-engineering-insights/insights/insight-tutorials/developer-insight.md similarity index 98% rename from docs/software-engineering-insights/insights/developer-insight.md rename to docs/software-engineering-insights/insights/insight-tutorials/developer-insight.md index 574bba0d6da..fee0e0f0a53 100644 --- a/docs/software-engineering-insights/insights/developer-insight.md +++ b/docs/software-engineering-insights/insights/insight-tutorials/developer-insight.md @@ -54,7 +54,7 @@ You can also create new integrations and associate the integration with the curr 2. Go the **Project** scope and select your **Project**. 3. If you don't have any existing Insight, then click on the **Create Insight** button on the landing page to create a new Insight. -![](./static/create-insight.png) +![](../static/create-insight.png) 4. If you already have existing Insights in your selected Project, then go to any Insight. For instructions, go to [View Insights](#view-insights). 5. In the header, select **All Insights**, and then select **Manage Insights**. @@ -86,9 +86,9 @@ The SCM PRs Report shows a high-level view of PRs moving through your SCM tool. * Analyze PR comments and categorize them based on a threshold. * Better understanding of the overall contribution of the team. -![](./static/scm-prs-report-one.png) +![](../static/scm-prs-report-one.png) -![](./static/scm-prs-report-two.png) +![](../static/scm-prs-report-two.png) To add the **SCM PRs Report** widget to Insights: @@ -109,7 +109,7 @@ For information, go to [SCM PRs Report](/docs/software-engineering-insights/sei- The SCM Coding Days Report displays the number of days in a given time period that a user or team has committed code to their SCM repository. This report can be used to track how active a user or team is in terms of code development and to identify trends in coding activity over time. -![](./static/coding-days.png) +![](../static/coding-days.png) To add the **SCM Coding Days Report** widget to Insights: @@ -128,7 +128,7 @@ For information, go to SCM [Coding Days Report](/docs/software-engineering-insig Use the SCM PR Lead Time by Stage Report to examine PR velocity based on time spent in various PR lifecycle stages. By default, this report shows the average time for all PRs. You can drill down to explore data for individual PRs. You can also configure this report to show the median, 90th percentile, or 95th percentile, instead of the average time. -![](./static/pr-leadtime-by-stage.png) +![](../static/pr-leadtime-by-stage.png) To add the **SCM PR Lead Time by Stage Report** widget to Insights: @@ -147,7 +147,7 @@ You can use the Single Stats reports to calculate various SCM related metrics su * SCM PRs Merge Single Stat * SCM PRs Single Stat -![](./static/scm-single-stat.png) +![](../static/scm-single-stat.png) To add the **SCM Coding Days Single Stat Report** widget to Insights: diff --git a/docs/software-engineering-insights/insights/dora-insight.md b/docs/software-engineering-insights/insights/insight-tutorials/dora-insight.md similarity index 98% rename from docs/software-engineering-insights/insights/dora-insight.md rename to docs/software-engineering-insights/insights/insight-tutorials/dora-insight.md index d0c5beee506..46d9735429f 100644 --- a/docs/software-engineering-insights/insights/dora-insight.md +++ b/docs/software-engineering-insights/insights/insight-tutorials/dora-insight.md @@ -82,7 +82,7 @@ For more information on configuring stages for the lead time workflow, refer to 2. Go the **Project** scope and select your **Project**. 3. If you don't have any existing Insight, then click on the **Create Insight** button on the landing page to create a new Insight. -![](./static/create-insight.png) +![](../static/create-insight.png) 4. If you already have existing Insights in your selected Project, then go to any Insight. For instructions, go to [View Insights](#view-insights). 5. In the header, select **All Insights**, and then select **Manage Insights**. @@ -104,7 +104,7 @@ Include **DORA metrics reports** to understand how well your team is doing. The To keep track of Deployment Frequency, set up a [Workflow profile](/docs/software-engineering-insights/sei-profiles/workflow-profiles/workflow-profile-overview). This helps you pick what to monitor like merged pull requests or CI/CD jobs for the associated Collections. You can adjust Workflow profiles to fit your team's way of working, focusing on specific steps like Source Code Management (SCM) or a combination of issue management, SCM, and CI/CD. For more information, go to Workflow profile. -![](./static/deployment-frequency.png) +![](../static/deployment-frequency.png) To add the **Deployment Frequency** widget to Insights: @@ -126,7 +126,7 @@ DORA calculation for Lead Time is similar to how lead time, in general, is calcu This report represents the amount of time it takes for a commit to get into production. -![](./static/leadtime-for-changes.png) +![](../static/leadtime-for-changes.png) To add the **Lead Time for Changes** widget to Insights: @@ -144,7 +144,7 @@ For information about other Lead Time reports, go to [Lead time reports](/docs/s Change Failure Rate represents the percentage of deployments that cause a failure in production. To monitor Change Failure Rate in SEI, you will again need to associate the widget with an existing workflow profile. -![](./static/change-failure-rate.png) +![](../static/change-failure-rate.png) To add the **Change Failure Rate** Report widget to Insights: @@ -161,7 +161,7 @@ The Mean Time To Restore (MTTR), also known as Time to Recover, represents the d The overall time can be analyzed stage by stage over the organization's failure recovery workflow. -![](./static/mttr.png) +![](../static/mttr.png) To add the **DORA Mean Time To Restore** report to the Insight: diff --git a/docs/software-engineering-insights/insights/trellis-insight.md b/docs/software-engineering-insights/insights/insight-tutorials/trellis-insight.md similarity index 98% rename from docs/software-engineering-insights/insights/trellis-insight.md rename to docs/software-engineering-insights/insights/insight-tutorials/trellis-insight.md index aa84462ae22..7332f211d5e 100644 --- a/docs/software-engineering-insights/insights/trellis-insight.md +++ b/docs/software-engineering-insights/insights/insight-tutorials/trellis-insight.md @@ -72,7 +72,7 @@ You can also create new integrations and associate the integration with the curr 2. Go the **Project** scope and select your **Project**. 3. If you don't have any existing Insight, then click on the **Create Insight** button on the landing page to create a new Insight. -![](./static/create-insight.png) +![](../static/create-insight.png) 4. If you already have existing Insights in your selected Project, then go to any Insight. For instructions, go to [View Insights](#view-insights). 5. In the header, select **All Insights**, and then select **Manage Insights**. @@ -101,7 +101,7 @@ The Trellis Score Report calculates and displays the Trellis Scores by individua The factors and weight associated with the Trellis Score are essentially defined at the profile level. To learn more, go to [Trellis Profile](/docs/software-engineering-insights/sei-profiles/trellis-profile). -![](./static/trellis-score-report.png) +![](../static/trellis-score-report.png) To add the **Trellis Score Report** widget to Insights: @@ -116,7 +116,7 @@ This report calculates and displays the Trellis Scores organized by Collection. The factors and associated metrics used while calculating the values are identical to how the Trellis Score is calculated in general with the differentiation on calculating the score for each collection i.e. average of the Trellis score calculated for all users that are part of the collection. -![](./static/trellis-score-by-collection.png) +![](../static/trellis-score-by-collection.png) To add the **Trellis Scores by Collection Report** widget to Insights: @@ -134,7 +134,7 @@ You can choose to display the scores of the immediate child Collections only. This report displays a table of base values that contribute to Trellis Scores at the contributor level i.e. it calculates and displays the breakdown of the Trellis Scores by individual developers. You can edit the widget to show different values (add/remove columns) or apply filtering. -![](./static/individual-raw-stats.png) +![](../static/individual-raw-stats.png) To add the **Individual Raw Stats Report** widget to Insights: @@ -148,7 +148,7 @@ To add the **Individual Raw Stats Report** widget to Insights: This report displays a table of base values that contribute to Trellis Scores at the collection level i.e. it calculates and displays the breakdown of the Trellis Scores (Base values) organized by Collection. You can edit the widget to show different values (add/remove columns) or apply filtering. -![](./static/raw-stats-by-collection.png) +![](../static/raw-stats-by-collection.png) To add the **Raw Stats by Collection Report** widget to Insights: diff --git a/docs/software-engineering-insights/insights/sei-insights.md b/docs/software-engineering-insights/insights/sei-insights.md index 993739bcb64..63912e06bcc 100644 --- a/docs/software-engineering-insights/insights/sei-insights.md +++ b/docs/software-engineering-insights/insights/sei-insights.md @@ -9,23 +9,38 @@ redirect_from: Insights are dashboards that make it easy to visualize and interpret metrics that are captured by SEI. With a variety of out-of-the-box widgets, you can create Insights that show the data you care about most and help you understand your engineering team's effectiveness and efficiency. -This topic explains how to create and view Insights. For information about configuring specific reports and the metrics that are presented on widgets, go to [Metrics and reports](/docs/category/metrics-and-reports). +This topic explains how to create and view Insights. For information about configuring specific widgets and the metrics that are presented on widgets, go to [Metrics and widgets](/docs/category/metrics-and-reports). + +![](./static/sei-insights.png) ## View Insights -The primary way to access Insights is through the **Insights** tab. +The primary way to access Insights is through the **Insights** page. 1. Log in to the Harness Platform and go to the SEI module. 2. Go the **Project** scope and select your project. -3. Select the **Insights** tab on the navigation menu.

You will be automatically directed to the first Collection and the associated Insight. Note that for the existing logged-in users, SEI maintains the latest state and automatically redirects you to the most recently viewed Collection and Insight. +3. Click on the **Insights** page on the navigation menu. + +You will be directed to the first Collection and its associated Insight. For logged-in users, SEI maintains your latest state and automatically redirects you to the most recently viewed Collection and Insight. ![](./static/manage-insight.png) -Select **Manage Insights** to view all the Insights. The Insights available for each Collection depend on the [Insight associations](#manage-insights-associations). The **Default** label indicates the Collection's default Insight. +* The Insights available for each Collection are determined by the [Insight associations](#manage-insights-associations). +* A Default label indicates the default Insight for a Collection. + +### Navigation options + +* To switch projects or Collections, use the Project and Collection dropdown menus in the navigation menu. -To switch projects or Collections, use the **Project** and **Collection** options on the navigation menu. You can also use the breadcrumbs in the Insights header to navigate up through the [Collection hierarchy](/docs/software-engineering-insights/sei-projects-and-collections/manage-collections). +![](./static/switch-projects.png) -If integration monitoring is enabled, you can view the integration status in the Insights header. +* Use the collection navigation in the Insights header to navigate through the [Collection hierarchy](/docs/software-engineering-insights/sei-projects-and-collections/manage-collections). + +![](./static/collection-navigation.png) + +* If [Integration Monitoring](/docs/software-engineering-insights/sei-integrations/sei-integrations-overview#integration-monitoring) is enabled, the integration status will be displayed in the Insights header. + +![](./static/integration-monitoring.png) ### Explore data @@ -35,13 +50,13 @@ Once you're viewing an Insight, you can explore the widgets and interact with th Widgets get data from [integrations](/docs/category/sei-integrations), which are inherited from the [Collections associated with Insights](#manage-insights-associations). In turn, Collections inherit integrations from their associated Harness project, because integrations are configured at the project level. For more information about this hierarchy and configuring Collections, go to [Collections](/docs/category/projects-and-collections). -You can also configure individual metrics and widgets to specify what data is used in calculations, how data is presented, and other criteria. For more information, go to [Metrics and reports](/docs/category/metrics-and-reports). +You can also configure individual metrics and widgets to specify what data is used in calculations, how data is presented, and other criteria. For more information, go to [Metrics and widgets](/docs/category/metrics-and-reports). ::: #### Set the Insight time -_Insight time_ is the time range selected by the user viewing an Insight. Widgets and reports must be configured to **Use Insight Time** in order for their data to update when you change the Insight time range. +_Insight time_ is the time range selected by the user viewing an Insight. Widgets must be configured to **Use Insight Time** in order for their data to update when you change the Insight time range. If a widget or report uses a specific time range, changing the Insight time has no impact on the data shown by that widget or report. @@ -51,61 +66,67 @@ Some widgets allow you to drill down into data. For example, you can select a se ![](./static/drilldown.png) -### Access Insights from the Insights list +### Insight management -1. While viewing an Insight, select **All Insights** in the header, and then select **Manage Insights** to go to the **Insights** list. -2. Select the Insight you want to view from the **Insights** list. +The Insight Management tab provides an overview of all available Insights, including details such as who created them. From here, you can edit Insight settings, delete Insights, or clone them for reuse. -![](./static/manage-insights.png) +#### Steps to manage insights -3. You can change the Collection using the Collection navigation header at the top of the Insight. -4. Select a **Collection**. Upon selecting a Collection, the current Insight opens up under the selected Collection heirarchy. +1. Log in to the **Harness platform** and select the SEI module. +2. Choose your project from the available list. +3. Select the **Insight Management** tab on the navigation menu. +4. View, edit, delete, or clone existing Insights directly from this tab. + +![](./static/insight-management.png) ## Create Insights -1. Log in to the **Harness Platform** and go to the SEI module. -2. Go the **Project** scope and select your **Project**. -3. If you don't have any existing Insight, then click on the **Create Insight** button on the landing page to create a new Insight. +Follow these steps to create a new Insight. + +* Log in to the **Harness platform** and select the SEI module. +* Choose your project from the available list. +* If no Insights exist, click Create Insight on the landing page. ![](./static/create-insight.png) -4. If you already have existing Insights in your selected Project, then go to any Insight. For instructions, go to [View Insights](#view-insights). -5. In the header, select **All Insights**, and then select **Manage Insights**. -6. Select **Create Insight**. +* If Insights already exist, navigate to Insight Management in the left navigation menu and click Create Insight at the top-right corner. -![](./static/insight-settings.png) +![](./static/create-new-insight.png) -7. Enter a **Name** for the Insight. -8. Under **Parameters**, you can modify the following settings: +* Enter a **Name** for the Insight. +* Modify the following parameters under the **Parameters** section: - * **Insight Time Range:** You can enable Insight time, which is the time range selected by the user when viewing an Insight. - * **Effort Investment Profile** and **Effort Investment Units** are used for [Business Alignment Insights](#business-alignment) and [DORA Metrics Insights](#dora-metrics). + * **Insight Time Range:** Allows users to select a custom date range when viewing the Insight. + * **Integration Monitoring:** Enable to display integration health status on the dashboard. -9. Select at least one Collection category to associate with this Insight.

All [Collections](/docs/software-engineering-insights/sei-projects-and-collections/manage-collections) and child Collections under the selected category are automatically associated with the Insight. You can refine the [Insight associations](#manage-insights-associations) after initial Insight creation.

If you want to examine Collections under a certain Collection category, select **View Collections** on the **Collections Categories** dropdown menu. +![](./static/insight-settings.png) -10. Select **Create** to save the Insight metadata. From here, you can [add reports](#add-reports) or modify the Insight settings, such as [Insights associations](#manage-insights-associations). +* Select at least one Collection category to associate with the Insight. + * All [Collections](/docs/software-engineering-insights/sei-projects-and-collections/manage-collections) and child Collections within the selected category will be automatically linked. + * To review Collections within a category, click **View Collections** in the Collections categories dropdown menu. +* Click **Create** to save the Insight. +* After saving, add widgets or modify settings such as [Insight associations](#manage-insights-associations). -### Add reports +### Add widgets -Reports (also known as *widgets*) are the parts of your Insights that contain data. Reports can present a single statistic or they can present charts, graphs, and tables that compare or combine multiple statistics. +Widgets (also known as *reports*) are the parts of your Insights that contain data. Widgets can present a single statistic or they can present charts, graphs, and tables that compare or combine multiple statistics. -To add reports to Insights: +To add widgets to Insights: -1. Go to the Insight where you want to add the report. +1. Go to the Insight where you want to add the widget. 2. Select **Settings**, and then select **Add Widget**. -3. Select the report that you want to add. -4. Configure the report settings. For information about configuring specific reports go to [Metrics and reports](/docs/category/metrics-and-reports). +3. Select the widget that you want to add. +4. Configure the widget settings. For information about configuring specific widget go to [Metrics and widget](/docs/category/metrics-and-reports). 5. Select **Next: Place Widget** and arrange the report on the Insight. 6. Select **Save Layout**. ![](./static/sample-report.png) +:::info Where does the data in widgets come from? -:::info Where does the data in reports come from? +Widgets get data from [integrations](/docs/category/sei-integrations), which are inherited from the [Collections associated with Insights](#manage-insights-associations). In turn, Collections inherit integrations from their associated Harness project, because integrations are mapped at the project level. For more information about this hierarchy and configuring Collections, go to [Collections](/docs/category/projects-and-collections). -Reports get data from [integrations](/docs/category/sei-integrations), which are inherited from the [Collections associated with Insights](#manage-insights-associations). In turn, Collections inherit integrations from their associated Harness project, because integrations are mapped at the project level. For more information about this hierarchy and configuring Collections, go to [Collections](/docs/category/projects-and-collections). - -You can also configure individual metrics and reports to specify what data is used in calculations, how data is presented, and other criteria. For more information, go to [Metrics and reports](/docs/category/metrics-and-reports). +You can also configure individual metrics and widgets to specify what data is used in calculations, how data is presented, and other criteria. For more information, go to [Metrics and reports](/docs/category/metrics-and-reports). ::: @@ -146,21 +167,23 @@ Use the DORA Metrics Insight to examine your organization's [DORA (DevOps Resear If you want to create a DORA Metrics Insight refer to the below resoures. -* [Tutorial: Create a DORA Metrics Insight](/docs/software-engineering-insights/insights/dora-insight) -* [DORA Metrics Reports](/docs/software-engineering-insights/sei-metrics-and-reports/dora-metrics) +* [Tutorial: Create a DORA Metrics Insight](/docs/software-engineering-insights/insights/insight-tutorials/dora-insight) +* [DORA widgets](/docs/software-engineering-insights/sei-metrics-and-reports/dora-metrics) + +![](./static/dora-insight.png) ### Dev Insights Dev Insights examines development efforts, particularly in relation to SCM metrics, such as PR creation, merging, and review collaboration. -For more information about SCM metrics and reports, go to [velocity metrics](/docs/software-engineering-insights/sei-metrics-and-reports/velocity-metrics-reports/velocity-metrics-overview) and [SCM reports](/docs/software-engineering-insights/sei-metrics-and-reports/velocity-metrics-reports/scm-reports). +For more information about SCM metrics and widgets, go to [velocity metrics](/docs/software-engineering-insights/sei-metrics-and-reports/velocity-metrics-reports/velocity-metrics-overview) and [SCM reports](/docs/software-engineering-insights/sei-metrics-and-reports/velocity-metrics-reports/scm-reports). If you want to create a Developer i.e. SCM Metrics Insight refer to the below resources. -* [Tutorial: Create a Developer / SCM Metrics Insight](/docs/software-engineering-insights/insights/developer-insight) -* [SCM Reports](/docs/software-engineering-insights/sei-metrics-and-reports/velocity-metrics-reports/scm-reports) +* [Tutorial: Create a Developer / SCM Metrics Insight](/docs/software-engineering-insights/insights/insight-tutorials/developer-insight) +* [SCM widgets](/docs/software-engineering-insights/sei-metrics-and-reports/velocity-metrics-reports/scm-reports) -![](./static/dev-insights.png) +![](./static/dev-insight.png) ### Business Alignment @@ -168,6 +191,8 @@ The Business Alignment Insight can help visualize where your teams expend the mo For more information about these report, go to [Business Alignment](/docs/software-engineering-insights/sei-metrics-and-reports/planning/sei-business-alignment-reports). +![](./static/business-alignment-insight.png) + ### Trellis @@ -176,8 +201,8 @@ Use the Trellis Insight to examine [Trellis Scores](/docs/category/trellis-score If you want to create a Trellis Insight refer to the below resources. -* [Tutorial: Create a Trellis Metrics Insight](/docs/software-engineering-insights/insights/trellis-insight) -* [Trellis Reports](/docs/category/trellis-scores) +* [Tutorial: Create a Trellis Metrics Insight](/docs/software-engineering-insights/insights/insight-tutorials/trellis-insight) +* [Trellis widgets](/docs/category/trellis-scores) ### Planning Insights @@ -185,5 +210,5 @@ Use the Planning Insight to examine [sprint metrics](/docs/category/sprint-metri If you want to create a Sprints Metrics Insight refer to the below resources. -* [Tutorial: Create a Sprint Metrics Insight](/docs/software-engineering-insights/insights/sprint-metrics-insight) -* [Sprint Metric Reports](/docs/category/sprint-metrics) \ No newline at end of file +* [Tutorial: Create a Sprint Metrics Insight](/docs/software-engineering-insights/insights/insight-tutorials/agile-insights) +* [Sprint metric widgets](/docs/category/sprint-metrics) \ No newline at end of file diff --git a/docs/software-engineering-insights/insights/static/business-alignment-insight.png b/docs/software-engineering-insights/insights/static/business-alignment-insight.png new file mode 100644 index 00000000000..cc879a293cf Binary files /dev/null and b/docs/software-engineering-insights/insights/static/business-alignment-insight.png differ diff --git a/docs/software-engineering-insights/insights/static/collection-navigation.png b/docs/software-engineering-insights/insights/static/collection-navigation.png new file mode 100644 index 00000000000..3f1dddeed53 Binary files /dev/null and b/docs/software-engineering-insights/insights/static/collection-navigation.png differ diff --git a/docs/software-engineering-insights/insights/static/create-new-insight.png b/docs/software-engineering-insights/insights/static/create-new-insight.png new file mode 100644 index 00000000000..61ac9b0cc33 Binary files /dev/null and b/docs/software-engineering-insights/insights/static/create-new-insight.png differ diff --git a/docs/software-engineering-insights/insights/static/dev-insight.png b/docs/software-engineering-insights/insights/static/dev-insight.png new file mode 100644 index 00000000000..070dd373b18 Binary files /dev/null and b/docs/software-engineering-insights/insights/static/dev-insight.png differ diff --git a/docs/software-engineering-insights/insights/static/dora-insight.png b/docs/software-engineering-insights/insights/static/dora-insight.png new file mode 100644 index 00000000000..f65c04c69a8 Binary files /dev/null and b/docs/software-engineering-insights/insights/static/dora-insight.png differ diff --git a/docs/software-engineering-insights/insights/static/insight-management.png b/docs/software-engineering-insights/insights/static/insight-management.png new file mode 100644 index 00000000000..bb3bcd7bad5 Binary files /dev/null and b/docs/software-engineering-insights/insights/static/insight-management.png differ diff --git a/docs/software-engineering-insights/insights/static/insight-settings.png b/docs/software-engineering-insights/insights/static/insight-settings.png index 43a0740dcdc..84679c9a17f 100644 Binary files a/docs/software-engineering-insights/insights/static/insight-settings.png and b/docs/software-engineering-insights/insights/static/insight-settings.png differ diff --git a/docs/software-engineering-insights/insights/static/integration-monitoring.png b/docs/software-engineering-insights/insights/static/integration-monitoring.png new file mode 100644 index 00000000000..337a423d88e Binary files /dev/null and b/docs/software-engineering-insights/insights/static/integration-monitoring.png differ diff --git a/docs/software-engineering-insights/insights/static/manage-insight.png b/docs/software-engineering-insights/insights/static/manage-insight.png index 71802c2e1f1..db4d07cd24b 100644 Binary files a/docs/software-engineering-insights/insights/static/manage-insight.png and b/docs/software-engineering-insights/insights/static/manage-insight.png differ diff --git a/docs/software-engineering-insights/insights/static/sei-insights.png b/docs/software-engineering-insights/insights/static/sei-insights.png new file mode 100644 index 00000000000..21abc5c6d4e Binary files /dev/null and b/docs/software-engineering-insights/insights/static/sei-insights.png differ diff --git a/docs/software-engineering-insights/insights/static/switch-projects.png b/docs/software-engineering-insights/insights/static/switch-projects.png new file mode 100644 index 00000000000..97c3a60872f Binary files /dev/null and b/docs/software-engineering-insights/insights/static/switch-projects.png differ diff --git a/docs/software-engineering-insights/sei-metrics-and-reports/dora-metrics/dora-onboarding.md b/docs/software-engineering-insights/sei-metrics-and-reports/dora-metrics/dora-onboarding.md index 0bc347cddb6..078829467f2 100644 --- a/docs/software-engineering-insights/sei-metrics-and-reports/dora-metrics/dora-onboarding.md +++ b/docs/software-engineering-insights/sei-metrics-and-reports/dora-metrics/dora-onboarding.md @@ -22,7 +22,7 @@ Furthermore, SEI gives you the flexibility to choose the [integrations](/docs/ca | Engineering Team Use Cases for Measuring DORA Metrics | Learn how engineering teams can utilize DORA metrics | [Click here](#engineering-team-use-cases-for-measuring-dora-metrics) | | DORA under-the-hood | Learn how Harness SEI correlates data across different tools and services | [Click here](#dora-under-the-hood) | | Create DORA Profile | Learn how you can create a workflow profile to measure the DORA Metrics | [Click here](/docs/software-engineering-insights/sei-profiles/workflow-profiles/workflow-profile-overview) | -| Create DORA Insight | Step by Step guide to create a DORA Insight | [Click here](/docs/software-engineering-insights/insights/dora-insight) | +| Create DORA Insight | Step by Step guide to create a DORA Insight | [Click here](/docs/software-engineering-insights/insights/insight-tutorials/dora-insight) | | DORA Metrics Calculation | How are the different DORA metrics calculated on Harness SEI | [Click here](/docs/category/dora-calculations) | | Best Practices & Recommendations for measuring DORA Metrics | Recommendations to improve your DORA Metrics reporting | [Click here](#best-practices--recommendations) | | DORA Roadmap | List of enhancements and improvements for DORA Metrics | [Click here](#roadmap) | @@ -189,7 +189,7 @@ DORA Reports are available for configuration only of you already have an associa Review the following resources to learn more about defining and managing DORA Profiles and DORA Insight on Harness SEI: * [Configure and Manage a DORA Profile]/docs/software-engineering-insights/sei-profiles/workflow-profiles/dora-profile) -* [Create and Manage a DORA Insight](/docs/software-engineering-insights/insights/dora-insight) +* [Create and Manage a DORA Insight](/docs/software-engineering-insights/insights/insight-tutorials/dora-insight) ## Best Practices & Recommendations diff --git a/docs/software-engineering-insights/sei-metrics-and-reports/planning/sprint-metrics/sei-sprints-metrics-overview.md b/docs/software-engineering-insights/sei-metrics-and-reports/planning/sprint-metrics/sei-sprints-metrics-overview.md index 4c0e4d25b4b..dc963bfc969 100644 --- a/docs/software-engineering-insights/sei-metrics-and-reports/planning/sprint-metrics/sei-sprints-metrics-overview.md +++ b/docs/software-engineering-insights/sei-metrics-and-reports/planning/sprint-metrics/sei-sprints-metrics-overview.md @@ -43,7 +43,7 @@ Sprint metrics can help you plan and deliver on sprints more effectively, includ Create a Sprints Insight Step by step guide to create a Sprint Insight - [Click Here](/docs/software-engineering-insights/insights/sprint-metrics-insight) + [Click Here](/docs/software-engineering-insights/insights/insight-tutorials/agile-insights) Best Practices and Recommendations @@ -147,7 +147,7 @@ Find the complete list of [Sprint Metrics ratios here](/docs/software-engineerin Having a Sprint Insight can help address issues related to the sprint success and enhance the overall sprint planning and delivery process. Refer to the below tutorial to learn how to create an Insight (i.e. Dashboard) configured with the most frequently used Sprint metrics reports to measure your team's contribution and activity across various sprint cycles. -* [**Tutorial: Create a Sprint Metrics Insight**](/docs/software-engineering-insights/insights/sprint-metrics-insight) +* [**Tutorial: Create a Sprint Metrics Insight**](/docs/software-engineering-insights/insights/insight-tutorials/agile-insights) ## Best Practices and Recommendations diff --git a/docs/software-engineering-insights/sei-metrics-and-reports/planning/sprint-metrics/sei-sprints-metrics.md b/docs/software-engineering-insights/sei-metrics-and-reports/planning/sprint-metrics/sei-sprints-metrics.md index 30c733060a3..67fd0d16142 100644 --- a/docs/software-engineering-insights/sei-metrics-and-reports/planning/sprint-metrics/sei-sprints-metrics.md +++ b/docs/software-engineering-insights/sei-metrics-and-reports/planning/sprint-metrics/sei-sprints-metrics.md @@ -41,7 +41,7 @@ For example: * An un-estimated task at the beginning of a sprint: Adds 0 commit points. * A task estimated as 2 story points: Adds 2 commit points. -* A task estimated as 2 points at the beginning of the sprint and later revised to 5 points during the sprint: Adds 2 commit points. +* A task estimated as 2 points at the beginning of the sprint and later revised to 5 points during the sprint: Adds 5 delivered story creep points. * A task estimated at 1 point is removed from the sprint while the sprint is in progress: Adds 1 commit point. * A task estimated at 2 points is completed before the sprint start time and then added to the sprint: Adds 2 commit points and 2 [commit done points](#commit-done-points). @@ -132,12 +132,13 @@ Sprint velocity is a crucial metric for teams and organizations. By quantifying * Prevent overcommitments in sprint planning. * Understand how teams should adapt and strategize in response to changing conditions. -**Sprint velocity**, also called _delivered points_ or _velocity points_, is the sum of story points completed within a sprint. +**Sprint velocity**, also called _delivered points_ or _velocity points_, is calculated as the sum of [commit done points](#commit-done-points), delivered scope creep points (i.e. [creep done points](#creep-done-points)), and delivered story creep points based on the number of story points assigned to completed tickets when the sprint ends. -Sprint velocity is calculated as the sum of [commit done points](#commit-done-points) and [creep done points](#creep-done-points), based on the number of story points assigned to completed tickets when the sprint ends. +* **Delivered scope creep points:** This [metric](#creep-done-points) captures the total story points from all completed creep tickets within the sprint, giving you better visibility into scope changes. +* **Delivered story creep points:** This represents the sum of story points from completed tickets where estimates were increased during the sprint. ```bash -Sprint velocity = Commit done points + Creep done points +Sprint velocity = Commit done points + Creep done points (i.e. Delivered scope creep points) + Delivered story creep points ``` The **Velocity Points STDEV** is the standard deviation of sprint velocity points. It represents the variability in a team's productivity over multiple sprints. diff --git a/docs/software-engineering-insights/sei-metrics-and-reports/trellis-scores/sei-trellis-scores-overview.md b/docs/software-engineering-insights/sei-metrics-and-reports/trellis-scores/sei-trellis-scores-overview.md index e275a3fcda0..fdc1be74462 100644 --- a/docs/software-engineering-insights/sei-metrics-and-reports/trellis-scores/sei-trellis-scores-overview.md +++ b/docs/software-engineering-insights/sei-metrics-and-reports/trellis-scores/sei-trellis-scores-overview.md @@ -34,7 +34,7 @@ sidebar_label: Overview Trellis Insight Step by step guide to create a Trellis Insight - [Click Here](/docs/software-engineering-insights/insights/trellis-insight) + [Click Here](/docs/software-engineering-insights/insights/insight-tutorials/trellis-insight) Trellis Calculations diff --git a/docs/software-engineering-insights/sei-profiles/static/dora-5.png b/docs/software-engineering-insights/sei-profiles/static/dora-5.png index fb013007e38..f3927591717 100644 Binary files a/docs/software-engineering-insights/sei-profiles/static/dora-5.png and b/docs/software-engineering-insights/sei-profiles/static/dora-5.png differ diff --git a/docs/software-engineering-insights/sei-profiles/workflow-profiles/dora-profile.md b/docs/software-engineering-insights/sei-profiles/workflow-profiles/dora-profile.md index e43b82c846a..05c33368cc5 100644 --- a/docs/software-engineering-insights/sei-profiles/workflow-profiles/dora-profile.md +++ b/docs/software-engineering-insights/sei-profiles/workflow-profiles/dora-profile.md @@ -46,6 +46,7 @@ Lead Time for Changes measures how long it takes for a task to move from develop * **Select the Starting event:** Define when Lead Time tracking begins: * **Ticket Created:** Starts tracking when a ticket is created in the issue management system. * **Commit Created:** Starts tracking when the first commit is made. + * **Use First Stage (Recommended):** Use the first stage configuration to trigger the lead time calculations. * **API Event:** Uses a custom API event to trigger Lead Time calculation. * **Define Workflow Stages:** * Add stages that represent key phases in your delivery process, such as issue management and CI/CD activities. @@ -62,7 +63,15 @@ This section covers how to set up the Lead Time for Changes metric definition us ![](../static/dora-4.png) -* Select **Ticket Created** as the start event. This triggers Lead Time tracking whenever a new ticket is created. +Choose the appropriate start event to define when Lead Time tracking begins in your workflow. + +* **Ticket Created** + * Tracking starts when a new ticket (e.g., story, task, or bug) is created in your issue management system. + * This option is ideal if you want to include the planning phase before development in the Lead Time calculation. + +* **First Stage** + * Tracking begins when the conditions specified in the first workflow stage are met. + * Use this option if you prefer to calculate Lead Time based on a specific status or phase in your delivery process. ![](../static/dora-5.png) @@ -208,8 +217,17 @@ This hybrid configuration involves correlating your **Issue Management system (e #### Define the workflow -Choose **Ticket Created** as the starting event. This begins Lead Time tracking when a new ticket (e.g., story, task, or bug) is created in your Issue Management system. -Why? This ensures that planning efforts before development are accounted for. +Choose the appropriate start event to define when Lead Time tracking begins in your workflow. + +* **Ticket Created** + * Tracking starts when a new ticket (e.g., story, task, or bug) is created in your issue management system. + * This option is ideal if you want to include the planning phase before development in the Lead Time calculation. + +* **First Stage** + * Tracking begins when the conditions specified in the first workflow stage are met. + * Use this option if you prefer to calculate Lead Time based on a specific status or phase in your delivery process. + +![](../static/dora-5.png) #### Add custom stages for tracking issue management statuses @@ -353,8 +371,17 @@ This configuration extends the hybrid approach of **Issue Management** and **SCM #### Define the workflow -Choose **Ticket Created** as the starting event. This begins Lead Time tracking when a new ticket (e.g., story, task, or bug) is created in your Issue Management system. -Why? This ensures that planning efforts before development are accounted for. +Choose the appropriate start event to define when Lead Time tracking begins in your workflow. + +* **Ticket Created** + * Tracking starts when a new ticket (e.g., story, task, or bug) is created in your issue management system. + * This option is ideal if you want to include the planning phase before development in the Lead Time calculation. + +* **First Stage** + * Tracking begins when the conditions specified in the first workflow stage are met. + * Use this option if you prefer to calculate Lead Time based on a specific status or phase in your delivery process. + +![](../static/dora-5.png) #### Add custom stages for tracking issue management statuses @@ -1716,4 +1743,4 @@ While configuring the DORA profile, you may encounter some common issues. Below After setting up the DORA profile, proceed to create the DORA Insight using the available DORA widgets. These widgets enable you to visualize and monitor key DORA metrics, providing actionable insights into your team’s performance. * [Configure & add the DORA widgets](/docs/software-engineering-insights/sei-metrics-and-reports/dora-metrics/) -* [Create & manage the DORA Insight](/docs/software-engineering-insights/insights/dora-insight) \ No newline at end of file +* [Create & manage the DORA Insight](/docs/software-engineering-insights/insights/insight-tutorials/dora-insight) \ No newline at end of file diff --git a/docs/software-engineering-insights/sei-profiles/workflow-profiles/styles.module.css b/docs/software-engineering-insights/sei-profiles/workflow-profiles/styles.module.css deleted file mode 100644 index 3aff9d2ba9f..00000000000 --- a/docs/software-engineering-insights/sei-profiles/workflow-profiles/styles.module.css +++ /dev/null @@ -1,20 +0,0 @@ -.red { - color: red; -} -.red[aria-selected='true'] { - border-bottom-color: red; -} - -.orange { - color: orange; -} -.orange[aria-selected='true'] { - border-bottom-color: orange; -} - -.yellow { - color: yellow; -} -.yellow[aria-selected='true'] { - border-bottom-color: yellow; -} \ No newline at end of file diff --git a/docs/software-supply-chain-assurance/cicd-security-posture-management-cicdspm.md b/docs/software-supply-chain-assurance/cicd-security-posture-management-cicdspm.md index 2097d64a9d2..384047d8fcb 100644 --- a/docs/software-supply-chain-assurance/cicd-security-posture-management-cicdspm.md +++ b/docs/software-supply-chain-assurance/cicd-security-posture-management-cicdspm.md @@ -52,7 +52,7 @@ By clicking on a specific evaluation status, you can access detailed information ## Plugins Tab​ -The Plugin tab provides a list of all plugins used in the pipeline. The tab name may vary based on the integration type. For instance, with GitHub, it will be labeled Actions. +The Plugin tab provides a list of all plugins used in the pipeline. The tab name may vary based on the integration. For instance, with GitHub, it will be labeled Actions. diff --git a/docs/software-supply-chain-assurance/integrations-and-permissions.md b/docs/software-supply-chain-assurance/integrations-and-permissions.md index 7d5d0e35834..3ee651c375c 100644 --- a/docs/software-supply-chain-assurance/integrations-and-permissions.md +++ b/docs/software-supply-chain-assurance/integrations-and-permissions.md @@ -4,10 +4,10 @@ description: Details of the integrations and their permissions required sidebar_position: 89 --- -Harness requires gathering data from all relevant entities to perform security scanning and apply standards across multiple software supply chain entities. The SCS module facilitates this by using Harness-built integration apps that connect with these entities and fetch the necessary data. These entities include code repositories, artifacts, and CI/CD toolchains. +Harness requires gathering data from all relevant integrations to perform security scanning and apply standards across multiple software supply chain targets. The SCS module facilitates this by using Harness-built integration apps that connect with these target types and fetch the necessary data. These target types include code repositories, artifacts, and CI/CD toolchains. :::note -Currently, Harness supports only Code Repositories, and specifically GitHub. In the near future, Harness will add support for other entities. +Currently, Harness supports only Code Repositories, and specifically GitHub. In the near future, Harness will add support for other integrations. ::: diff --git a/docs/software-supply-chain-assurance/manage-risk-and-compliance/manage-compliance-posture.md b/docs/software-supply-chain-assurance/manage-risk-and-compliance/manage-compliance-posture.md index a2cab60ac68..110cd8780d7 100644 --- a/docs/software-supply-chain-assurance/manage-risk-and-compliance/manage-compliance-posture.md +++ b/docs/software-supply-chain-assurance/manage-risk-and-compliance/manage-compliance-posture.md @@ -5,23 +5,23 @@ description: Assess and understand the risk posture of your software supply chai sidebar_position: 1 --- -The Compliance section within the SCS module serves as a hub for assessing and understanding the risk posture of your entire supply chain. This section is indispensable for GRC (Governance, Risk, and Compliance) and security teams as it provides detailed evaluation results after applying all relevant rules to various entity types within your supply chain. You can access a thorough summary of these evaluations, including the specific rules applied, their execution statuses, and the entities impacted by each rule. +The Compliance section within the SCS module serves as a hub for assessing and understanding the risk posture of your entire supply chain. This section is indispensable for GRC (Governance, Risk, and Compliance) and security teams as it provides detailed evaluation results after applying all relevant rules to various target types within your supply chain. You can access a thorough summary of these evaluations, including the specific rules applied, their execution statuses, and the target types impacted by each rule. - + -To learn more about the supported standards and the rules applied to entities, please refer to the documentation on [Standards and Rule Definitions](./manage-compliance-posture). +To learn more about the supported standards and the rules applied to targets, please refer to the documentation on [Standards and Rule Definitions](./manage-compliance-posture). :::note -Currently Harness supports Code Repositories, and CI/CD. Please refer to the [Use SCS](../get-started/onboarding-guide#use-scs) section in the onboarding document to see the full list of supported integrations for each feature. In the near future, Harness will add support for other entities and integrations. +Currently Harness supports Code Repositories,and CI/CD. Please refer to the [Use SCS](../get-started/onboarding-guide#use-scs) section in the onboarding document to see the full list of supported integrations for each feature. In the near future, Harness will add support for other target types and integrations. ::: ## Compliance Overview -The Summery tab provides comprehensive details of evaluations conducted across all entity types of the software supply chain. This tab offers details about: +The Summery tab provides comprehensive details of evaluations conducted across all target types of the software supply chain. This tab offers details about: - + * **Evaluation Breakdown**: A summary of rules passing versus failing. * **Failure by Severity**: Displays the number of failures categorized by severity levels: critical, high, medium, and low. @@ -34,22 +34,22 @@ Additionally, users can apply filters based on standards to view evaluations and ## View Rule Evaluations -The Rules tab in the “Compliance” section provides a detailed view of all the rules and their complete execution details applied across all the entities configured. For each rule, along with its name and description, the view provides the following information: +The Evaluations tab in the “Compliance” section provides a detailed view of all the rules and their complete execution details applied across all the targets configured. For each rule, along with its name and description, the view provides the following information: - + -* **Evaluations**: Displays the total number of evaluations occurred, indicating whether they passed or failed, and the total number of entity types to which the rule is applied (e.g., code repositories, artifacts, CI/CD tools). +* **Evaluations**: Displays the total number of evaluations occurred, indicating whether they passed or failed, and the total number of targets to which the rule is applied (e.g., code repositories, artifacts, CI/CD tools). * **Severity**: Presents the severity of each rule, categorized as critical, high, medium, or low. * **Standard:** Indicates the standard to which the rule belongs and the rule's ID according to the official ID convention. Users can view rules filtered by standards and apply an additional filter specific to severity. Also, users can perform searches within the filtered results. The history of the data can be viewed for the last 24 hours, 7 days, and 30 days. -## View Impacted Entities +## View Impacted Target -Upon clicking a rule in the Rules tab, you will see a list of all the entities impacted by the evaluation. This page provides information about the rule, its description, evaluation history and general remediation steps to address any failures. +Upon clicking a rule in the Evaluations tab, you will see a list of all the targets impacted by the evaluation. This page provides information about the rule, its description, evaluation history and general remediation steps to address any failures. - + Clicking on an item will bring up the latest evaluation details. Alternatively, you can click on an icon from the evaluation history to view specific evaluation details. The information includes the time of evaluation, the reason for failure (if failed), and the remediation information to address the issues. diff --git a/docs/software-supply-chain-assurance/manage-risk-and-compliance/standards-and-rule-definitions.md b/docs/software-supply-chain-assurance/manage-risk-and-compliance/standards-and-rule-definitions.md index df3b8665278..15efa5ed286 100644 --- a/docs/software-supply-chain-assurance/manage-risk-and-compliance/standards-and-rule-definitions.md +++ b/docs/software-supply-chain-assurance/manage-risk-and-compliance/standards-and-rule-definitions.md @@ -5,11 +5,11 @@ description: View and manage the standards and associated rules supported by Har sidebar_position: 2 --- -The **Rule Definitions** section offers a complete list of all the standards and associated rules supported by Harness SCS. These rules are applied to various entity types, and the overall compliance posture is presented in the Compliance section of SCS. To learn more about managing the compliance status, refer to the document [Manage Compliance Posture](./manage-compliance-posture) +The **Rule Definitions** section offers a complete list of all the standards and associated rules supported by Harness SCS. These rules are applied to various target types, and the overall compliance posture is presented in the Compliance section of SCS. To learn more about managing the compliance status, refer to the document [Manage Compliance Posture](./manage-compliance-posture) - + -The page offers details about the rule, including its description, severity (defined by Harness), the standard with the rule ID to which it belongs, and the entity type to which it applies (e.g., code repository, artifact, CI/CD). +The page offers details about the rule, including its description, severity (defined by Harness), the standard with the rule ID to which it belongs, and the target type to which it applies (e.g., code repository, artifact, CI/CD). You can apply filters specific to standards to view the rules associated with those standards and use the search function to find specific rules. @@ -29,7 +29,7 @@ Harness supports the following standards. ### CIS Benchmarks -The following CIS v1.0 rules are supported by Harness for the evaluations, and Harness will continue to add more rules across different entity types. For more detailed information, refer to the official [CIS documentation](https://www.cisecurity.org/benchmark/software-supply-chain-security) +The following CIS v1.0 rules are supported by Harness for the evaluations, and Harness will continue to add more rules across different target types. For more detailed information, refer to the official [CIS documentation](https://www.cisecurity.org/benchmark/software-supply-chain-security)
CIS Benchmarks @@ -94,7 +94,7 @@ The following CIS v1.0 rules are supported by Harness for the evaluations, and H
### OWASP Top 10 CI/CD Security Risks -The following rules are supported by Harness to perform evaluations, and Harness will continue to add more rules across different entity types. For more detailed information, refer to the official [OWASP documentation](https://owasp.org/www-project-top-10-ci-cd-security-risks/). +The following rules are supported by Harness to perform evaluations, and Harness will continue to add more rules across different target types. For more detailed information, refer to the official [OWASP documentation](https://owasp.org/www-project-top-10-ci-cd-security-risks/).
OWASP Top 10 CI/CD Security Risks diff --git a/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-evaluations-tab.png b/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-evaluations-tab.png new file mode 100644 index 00000000000..5fc86e0803b Binary files /dev/null and b/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-evaluations-tab.png differ diff --git a/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-infographic.png b/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-infographic.png new file mode 100644 index 00000000000..2f7cc7fde65 Binary files /dev/null and b/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-infographic.png differ diff --git a/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-rule-definitions.png b/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-rule-definitions.png new file mode 100644 index 00000000000..d4502fe8f83 Binary files /dev/null and b/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-rule-definitions.png differ diff --git a/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-summary.png b/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-summary.png new file mode 100644 index 00000000000..b6cd7709ce8 Binary files /dev/null and b/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-summary.png differ diff --git a/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-target.png b/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-target.png new file mode 100644 index 00000000000..7d0b52451d3 Binary files /dev/null and b/docs/software-supply-chain-assurance/manage-risk-and-compliance/static/compliance-target.png differ diff --git a/docs/software-supply-chain-assurance/repository-security-posture-management-rspm.md b/docs/software-supply-chain-assurance/repository-security-posture-management-rspm.md index ad37c202ffb..e40789890b7 100644 --- a/docs/software-supply-chain-assurance/repository-security-posture-management-rspm.md +++ b/docs/software-supply-chain-assurance/repository-security-posture-management-rspm.md @@ -36,7 +36,7 @@ To onboard your code repositories, refer to the [Get Started](./get-started) gui ## Supported code repositories -The RSPM feature in the Harness SCS module is currently supported only for **GitHub**. Harness plans to add support for other code repositories in the near future. To learn more about all supported entities and their required permissions, please refer to the [Integrations and Permissions](./integrations-and-permissions) +The RSPM feature in the Harness SCS module is currently supported only for **GitHub**. Harness plans to add support for other code repositories in the near future. To learn more about all supported integrations and their required permissions, please refer to the [Integrations and Permissions](./integrations-and-permissions) ## Overview Tab diff --git a/docs/software-supply-chain-assurance/ssca-access-control.md b/docs/software-supply-chain-assurance/ssca-access-control.md index 4dcc9a78b36..43c1cd026f4 100644 --- a/docs/software-supply-chain-assurance/ssca-access-control.md +++ b/docs/software-supply-chain-assurance/ssca-access-control.md @@ -11,7 +11,7 @@ Harness's RBAC system enables you to precisely manage the user access to specifi ## RBAC for Remediation Tracker -The configuration of RBAC for the Remediation Tracker is possible at three levels: Account, Organization, and Project. +The configuration of RBAC for the Remediation Tracker is possible at three levels: Account, [Organization](/docs/software-supply-chain-assurance/ssca-access-control#organization), and [Project](/docs/software-supply-chain-assurance/ssca-access-control#project). ### Creating and Managing Roles @@ -20,14 +20,20 @@ Here's a guide to creating a role or managing permissions for the Remediation Tr 1. Navigate to **Account Settings** > **Access Control** > **Roles** within your Harness Account. + + + + 2. Add a new role or select an existing one to modify. -3. Within the role, select Supply Chain Security. This action will display the SCS Permissions. + + -![SCS RBAC](./static/ssca-rbac.png "SCS RBAC") +3. Within the role, select Supply Chain Assurance. This action will display the SCS Permissions. + The Remediation Tracker is governed by the following permissions: @@ -36,9 +42,13 @@ The Remediation Tracker is governed by the following permissions: * **Create/Edit**: Enables users to create new trackers and edit existing ones. * **Close:** Allows users to close any trackers. -For the Organization level, open the same account settings and proceed to **Organizations**. Choose your organization and under **Organization Level Access Control and Audit Trail**, select **Access Control**. Here, configure the roles and permissions at the organization level in a manner similar to the account level process. -To set roles and permissions at the Project level, navigate to the **Project** section from the module navigation bar, and select **Access Control**. Follow similar steps as above to establish the roles and permissions for the project level. +#### **Organization**: + +To configure roles and permissions at the organization level, open the Organization Settings. From the module navigation bar, select your desired organization, then choose **Access Control**. Here, you can configure the roles and permissions at the organization level, following a process similar to the one used at the account level + +#### **Project**: +To set roles and permissions at the Project level, open the Project Settings, and select **Access Control**. Follow similar steps as above to establish the roles and permissions for the project level. ### Creating and Managing Resource Groups @@ -49,6 +59,7 @@ Here's how you can create and manage resource groups for the remediation tracker 3. Set the "Resource Scope" accordinlgy if you are creating one. 4. Within the Resources, select Supply Chain Assurance. -![Resource groups](./static/resource-groups-remediation-tracker.png "Resource groups") + + For configuring at both the organization and project levels, the navigation process is similar to what was detailed in the previous section. \ No newline at end of file diff --git a/docs/software-supply-chain-assurance/static/access-control-permissions.png b/docs/software-supply-chain-assurance/static/access-control-permissions.png new file mode 100644 index 00000000000..5ea9a804131 Binary files /dev/null and b/docs/software-supply-chain-assurance/static/access-control-permissions.png differ diff --git a/docs/software-supply-chain-assurance/static/access-control-rbac.png b/docs/software-supply-chain-assurance/static/access-control-rbac.png new file mode 100644 index 00000000000..91d7b1a2296 Binary files /dev/null and b/docs/software-supply-chain-assurance/static/access-control-rbac.png differ diff --git a/docs/software-supply-chain-assurance/static/access-control-remediation.png b/docs/software-supply-chain-assurance/static/access-control-remediation.png new file mode 100644 index 00000000000..95628c3cd31 Binary files /dev/null and b/docs/software-supply-chain-assurance/static/access-control-remediation.png differ diff --git a/docs/software-supply-chain-assurance/static/sca-access-control.png b/docs/software-supply-chain-assurance/static/sca-access-control.png new file mode 100644 index 00000000000..563ef654824 Binary files /dev/null and b/docs/software-supply-chain-assurance/static/sca-access-control.png differ diff --git a/docusaurus.config.js b/docusaurus.config.js index 98bd5d60779..52e049e03cc 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -616,9 +616,9 @@ async function config() { path.join(__dirname, '/plugins/focusOnAnchor-plugin'), ], clientModules: [ - path.join(__dirname, '/client_module/searchBar'), - path.join(__dirname, '/client_module/iframeEmbed'), - path.join(__dirname, '/client_module/chatbot'), + path.join(__dirname, '/client-modules/searchBar'), + path.join(__dirname, '/client-modules/iframeEmbed'), + path.join(__dirname, '/client-modules/chatbot'), ], stylesheets: [ { diff --git a/kb/chaos-engineering/chaos-engineering-faq.md b/kb/chaos-engineering/chaos-engineering-faq.md index cd81d5a4cd5..7c6355db6cc 100644 --- a/kb/chaos-engineering/chaos-engineering-faq.md +++ b/kb/chaos-engineering/chaos-engineering-faq.md @@ -215,6 +215,11 @@ No, the existing infrastructures will continue to function as usual, but HCE rec #### Is there a way to upgrade the older experiment to the new format? - Yes, you can manually edit the experiment manifest or create a new experiment from the UI. Older experiments will continue to work because of backward compatibility. +## Application Maps + +#### How to manually associate experiments as a part of Application Map? +To manually associate the experiment as a part of an application map, list the experiment as a part of an [application map](/docs/chaos-engineering/use-harness-ce/application-map), specify the tag `applicationmap=` while creating the experiment. + ## Security #### What are the identity providers supported by Harness Chaos for user authentication? diff --git a/kb/reference-architectures/iacm/iacm-best-practices.md b/kb/reference-architectures/iacm/iacm-best-practices.md index 2d9999fbd21..5a211303a5a 100644 --- a/kb/reference-architectures/iacm/iacm-best-practices.md +++ b/kb/reference-architectures/iacm/iacm-best-practices.md @@ -49,7 +49,7 @@ For general use cases to reduce unnecessary complexity and to optimize performan Some reusable options can be to: - Create reusable pipelines and set them as default pipelines to trigger quickly from any workspace within a project. - Use [pipeline variables](https://developer.harness.io/docs/infra-as-code-management/project-setup/input-variables) to ensure consistency. -- Use built-in plugins such as [drift detection](https://developer.harness.io/docs/infra-as-code-management/pipelines/operations/drift-detection), [PR automation](https://developer.harness.io/docs/infra-as-code-management/use-iacm/pr-automation) and [IaCM Approval steps](https://developer.harness.io/docs/infra-as-code-management/use-iacm/approval-step). +- Use built-in plugins such as [drift detection](https://developer.harness.io/docs/infra-as-code-management/use-iacm/drift-detection), [PR automation](https://developer.harness.io/docs/infra-as-code-management/use-iacm/pr-automation) and [IaCM Approval steps](https://developer.harness.io/docs/infra-as-code-management/use-iacm/approval-step). - Utilize [built-in OPA policies](https://developer.harness.io/docs/infra-as-code-management/policies/terraform-plan-cost-policy) to add protection and ensure your pipelines warn or fail if certain conditions are not met, e.g. if your total monthly infrastructure costs exceed a specified amount. diff --git a/netlify/functions/chatbot_proxy.mts b/netlify/functions/chatbot_proxy.mts index 150b85e763f..b05042fa7cd 100644 --- a/netlify/functions/chatbot_proxy.mts +++ b/netlify/functions/chatbot_proxy.mts @@ -55,7 +55,7 @@ export default async (req: Request, context: Context) => { } const responseData = await response.json(); - console.log(responseData); + // console.log(responseData); const uuid = await getUUID(body.account_id, token); if (!uuid) { diff --git a/package.json b/package.json index 2d49a297d90..af4be122560 100644 --- a/package.json +++ b/package.json @@ -19,6 +19,7 @@ "test": "npm run lint:ci" }, "dependencies": { + "@coveo/headless": "2.80.5", "@docusaurus/core": "^3.6.2", "@docusaurus/cssnano-preset": "^3.6.2", "@docusaurus/faster": "^3.6.2", diff --git a/release-notes/chaos-engineering.md b/release-notes/chaos-engineering.md index a48a7832e09..27003758a8f 100644 --- a/release-notes/chaos-engineering.md +++ b/release-notes/chaos-engineering.md @@ -1,7 +1,7 @@ --- title: Chaos Engineering release notes sidebar_label: Chaos Engineering -date: 2024-12-19T10:00 +date: 2025-01-16T10:00 sidebar_position: 5 --- @@ -20,25 +20,38 @@ The release notes describe recent changes to Harness Chaos Engineering. ::: -## December 2024 +## January 2025 -### Version 1.50.3 +### Version 1.52.3 -#### New features and enhancements +- **New videos**: [Pod Network Rate Limit](https://youtu.be/01efVOyFGl8?si=FQKWhVgdUJ0889fj), [Pod API Modify Header](https://youtu.be/sIkUxtnQY_o?si=ApWs_Opx2x27SkLj), [Pod IO Attribute Override](https://youtu.be/chk5K754J_4?si=pmzAgnpmHJC0f3Oz), [Pod API Block](https://youtu.be/Cg5gbfFrJQs?si=KueFmRJ6k8Ji4kbS), [Pod API Modify Body](https://youtu.be/Dbr_KwfTxps?si=-aHOmAr5onrFq6Zy). Adds a [playlist](https://www.youtube.com/playlist?list=PLXsYHFsLmqf0fgHoZANmwGB1tSQka5kDV) with Kubernetes Pod Chaos experiments. + +#### Fixed issues + +- Fixed the issue where fault templates were not working without enabling the feature flag associated with it. (CHAOS-7210) + +- Fixed the issue where automatic experiment creation was unable to create the required number of experiments in **Advanced/Maximum** mode. (CHAOS-7214) + +## Previous releases + +
+2024 releases + +#### December 2024, Version 1.50.3 + +##### New features and enhancements - Adds support for [configuring image registries at multiple scopes](/docs/chaos-engineering/use-harness-ce/image-registry#why-use-a-custom-image-registry), such as Project, Account, Organization, and Infrastructure levels. These settings can be automatically inherited by lower levels, but if the "override allowed" option is enabled at the parent level, lower levels can modify or override these configurations. It is behind the feature flag `CHAOS_IMAGEREGISTRY_DEV`. (CHAOS-6570) -#### Fixed issues +##### Fixed issues - Fixed an issue where ChaosGuard was not evaluating correctly after adding support for environments. (CHAOS-7075) - Fixed an issue where the Pod API modify header fault failed to function as expected when the header value was set to '/'. (CHAOS-7063) -## November 2024 +#### November 2024, Version 1.49.1 -### Version 1.49.1 - -#### New features and enhancements +##### New features and enhancements - Extends ChaosGuard conditions for node-level chaos experiments. (CHAOS-6788) @@ -53,21 +66,21 @@ The release notes describe recent changes to Harness Chaos Engineering. - Adds local and UTC times in the cron schedule next run. (CHAOS-6974) -#### Fixed issues +##### Fixed issues - Fixed the cron experiment execution that was not working with Linux and Windows infrastructure (CHAOS-7044) - Fixed the issue of **Visual** and **YAML** tabs overlapping while trying to toggle between them in the **Condition Editor** in ChaosGuard. (CHAOS-7026) -### Version 1.48.0 +#### November 2024, Version 1.48.0 -#### New features and enhancements +##### New features and enhancements - Adds a pre-check to the Windows global blackhole experiment to verify if the firewall is enabled for the target Windows VM. If not, the `ENABLE_FIREWALL` tunable is set, which, by default, enables the firewall. (CHAOS-6848) - Introduces the Windows disk fill chaos experiment, supported by Windows chaos infrastructure. (CHAOS-6842) -#### Fixed issues +##### Fixed issues - Fixed the input mechanism for specifying multiple zones for CLB AZ down chaos fault, now allowing comma-separated values for multiple inputs. (CHAOS-6909) @@ -83,11 +96,9 @@ The release notes describe recent changes to Harness Chaos Engineering. - Fixed the issue where two continuous command probes with a short polling duration could not execute in Linux infrastructure. (CHAOS-5461) -## October 2024 +#### October 2024, Version 1.47.0 -### Version 1.47.0 - -#### New features and enhancements +##### New features and enhancements - Adds support to explicitly define the log watcher sidecar for chaos experiment manifest that use Harness Delegate. (CHAOS-6703) @@ -101,7 +112,7 @@ The release notes describe recent changes to Harness Chaos Engineering. - Adds the functionality to block all inbound rules for Windows global blackhole chaos. (CHAOS-6603) -#### Fixed issues +##### Fixed issues - Fixed an issue where the list of infrastructure supported by Harness Delegate showed deleted infrastructure. (CHAOS-6742) @@ -111,11 +122,9 @@ The release notes describe recent changes to Harness Chaos Engineering. - Fixed the issue where the experiment schedule type was not being updated when it was changed from non-cron to cron type. (CHAOS-6822) -## September 2024 - -### Version 1.45.5 +#### September 2024, Version 1.45.5 -#### Fixed issues +##### Fixed issues - Fixed the issue where chaos infrastructure created with the help of a sandbox showed "Supported by a Harness Delegate". (CHAOS-6501) @@ -123,23 +132,19 @@ The release notes describe recent changes to Harness Chaos Engineering. - Fixed the issue of discrepancy between the number of probes in the UI and backend. (CHAOS-6528) -## August 2024 - -### Version 1.44.3 +#### August 2024, Version 1.44.3 -#### New features and enhancements +##### New features and enhancements - Enables the global blackhole chaos to block inbound traffic. (CHAOS-6381) -#### Fixed issues +##### Fixed issues - CPU utilization increased due to continuously executing clean up tasks. This issue has been fixed by adding a sleep operation that runs after every "remove" operation and optimizes overall CPU performance. (CHAOS-5709) -## July 2024 - -### Version 1.43.3 +#### July 2024, Version 1.43.3 -#### New features and enhancements +##### New features and enhancements - Crictl binary is upgraded from 1.29.0 to 1.31.0 to fix 3 vulnerabilities. (CHAOS-6357) @@ -149,7 +154,7 @@ The release notes describe recent changes to Harness Chaos Engineering. - Adds **Probe Properties** tab on the UI in ChaosHub to show details about the probe selected. (CHAOS-6132) -#### Fixed issues +##### Fixed issues - Fixed issue where GameDay was not available to users at the project level but was available at the account/organization level who had administrator access. (CHAOS-6349) @@ -165,15 +170,15 @@ The release notes describe recent changes to Harness Chaos Engineering. - Fixed an incorrect syntax in the `kubectl watch` command in the UI. (CHAOS-5968) -### Version 1.41.1 +#### July 2024, Version 1.41.1 -#### Fixed issues +##### Fixed issues - Fixed the error associated with upgrading a chaos infrastructure by providing relevant permissions for the upgrade agent in the execution plane (user host/cluster). (CHAOS-5980) -### Version 1.40.1 +#### July 2024, Version 1.40.1 -#### New features and enhancements +##### New features and enhancements - Adds a new Kubernetes pod fault, [pod IO mistake](/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-mistake) that causes files to read or write an incorrect value. (CHAOS-5916) @@ -187,13 +192,15 @@ The release notes describe recent changes to Harness Chaos Engineering. - Converts the default health check probes to `type:inline` from `type:source` for Kubernetes infrastructure to improve the execution speed of chaos experiments. (CHAOS-4348) -#### Fixed issues +##### Fixed issues + +- CPU utilization increased due to continuously executing clean up tasks. This issue has been fixed by adding a sleep operation that runs after every remove operation and optimizes overall CPU performance. (CHAOS-5709) - Fixed an issue where an experiment in the `Error` state would not finish, and be in a state of infinite run timestamp. (CHAOS-5577) -### Version 1.39.11 +#### July 2024, Version 1.39.11 -#### Fixed issues +##### Fixed issues - Fixed an issue wherein trying to add a pre-defined experiment in Windows infrastructure was unsuccessful. (CHAOS-5863) @@ -201,11 +208,9 @@ The release notes describe recent changes to Harness Chaos Engineering. - Fixed an issue where the **Linux restart** chaos fault could not parse string values. (CHAOS-5616) -## May 2024 +#### May 2024, Version 1.38.7 -### Version 1.38.7 - -#### New features and enhancements +##### New features and enhancements - This release provides support to install chaos infrastructure using Delegates, and this is known as DDCI (Delegate-Driven Chaos Infrastructure). (CHAOS-2017) @@ -238,7 +243,7 @@ The release notes describe recent changes to Harness Chaos Engineering. - Adding all the overrides to the chaos engine. - Enhancing the list filter, compatible only with the new experiment template. (CHAOS-5122) -#### Fixed issues +##### Fixed issues - Fixed an issue where the compatibility check was enabled for other infrastructure types too. The overview form now preserves the state while switching between different infrastructures. (CHAOS-5614) @@ -252,19 +257,19 @@ The release notes describe recent changes to Harness Chaos Engineering. - Fixed an issue where the [pod IO stress](/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-io-stress) experiment incorrectly applied stress on the helper pod instead of the target container. (CHAOS-5416) -### Version 1.37.0 +#### May 2024, Version 1.37.0 -#### New features and enhancements +##### New features and enhancements - This release introduces the DynamoDB replication pause experiments powered by AWS FIS. These experiments improve the configuration, execution, and monitoring capabilities of the application. (CHAOS-5002) -#### Fixed issues +##### Fixed issues - Fixed an issue where the command probe multiple source probes were overridden. (CHAOS-5308) -### Version 1.36.5 +#### May 2024, Version 1.36.5 -#### Fixed issues +##### Fixed issues - Fixed an issue where accounts that started with an underscore could not execute a Linux chaos experiment. (CHAOS-5185) @@ -274,15 +279,13 @@ The release notes describe recent changes to Harness Chaos Engineering. - Fixed an issue in the UI where chaos experiments with the toggle option to enable (or disable) cloud secret was enabled automatically after saving the experiment. (CHAOS-4987) -## April 2024 - -### Version 1.35.1 +#### April 2024, Version 1.35.1 -#### New features and enhancements +##### New features and enhancements * The node drain chaos experiment now supports selecting multiple target nodes in sequence(serial or parallel). (CHAOS-2187) -#### Fixed issues +##### Fixed issues * Linux command probes in "source" mode was failing due to a module mismatch. This is fixed now. (CHAOS-4952) @@ -292,9 +295,9 @@ The release notes describe recent changes to Harness Chaos Engineering. * If syntax errors were identified in a manifest after uploading it, user had to refresh the page and re-upload the YAML. This is fixed now, and users can edit the YAML without refreshing it. (CHAOS-4905) -### Version 1.34.5 +#### April 2024, Version 1.34.5 -#### New features and enhancements +##### New features and enhancements * Adds 32-bit Windows support for Windows chaos infrastructure. (CHAOS-4792) @@ -310,7 +313,7 @@ The release notes describe recent changes to Harness Chaos Engineering. * Updates ensure smooth operation of the pod API chaos and pod HTTP chaos faults in case the target pod restarts. (CHAOS-4187) -#### Fixed issues +##### Fixed issues * Resilience probes were not available for Windows experiments. This is fixed. (CHAOS-4786) @@ -322,17 +325,15 @@ The release notes describe recent changes to Harness Chaos Engineering. * When you provide a source port for the Linux network loss experiment, all the ports on the VM were targeted. This is fixed. (CHAOS-4591) -## March 2024 - -### Version 1.33.1 +#### March 2024, Version 1.33.1 -#### New features and enhancements +##### New features and enhancements * The Windows blackhole chaos experiment supports graceful abort functionality, thereby providing better control and flexibility while performing the experiment. (CHAOS-4582) -### Version 1.32.1 +#### March 2024, Version 1.32.1 -#### New features and enhancements +##### New features and enhancements * Adds `listInfrasWithExperimentStats` API to fetch the experiment statistics for the requested chaos infrastructure. The API takes a list of infrastructure IDs (infraIDs) and returns the associated experiment and experiment run count. The `listInfras` API is deprecated. (CHAOS-4417) @@ -345,27 +346,23 @@ The release notes describe recent changes to Harness Chaos Engineering. * Adds Helm support to install chaos infrastructure. (CHAOS-3327) -#### Fixed issues +##### Fixed issues * When a chaos experiment was cloned and the probe configuration of the cloned experiment was modified, the changes to the probe configuration were not reflected in the experiment. This issue is resolved. (CHAOS-4249) -## February 2024 - -### Version 1.31.2 +#### February 2024, Version 1.31.2 -#### New features and enhancements +##### New features and enhancements * This release adds API support to install and upgrade chaos infrastructure using Helm. (CHAOS-2998) -#### Fixed issues +##### Fixed issues * Disabling a Linux resilience probe removed all chaos faults associated with the chaos experiment. It has been fixed. Now, you can bulk enable and disable a Kubernetes and a Linux infrastructure's resilience probe. (CHAOS-3849) -## January 2024 +#### January 2024, Version 1.30.0 -### Version 1.30.0 - -#### New features and enhancements +##### New features and enhancements * Appropriate environment variables are added at relevant places to ensure that the self-managed platform (SMP) can be used with feature flags (FF). (CHAOS-3865) @@ -373,17 +370,17 @@ The release notes describe recent changes to Harness Chaos Engineering. * This release adds wildcard support for all entities in the [chaosguard conditons](/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/#condition). (CHAOS-3254) -#### Fixed issues +##### Fixed issues * Chaos hub icons were not visible when the hub name consisted of the '/' character. This is fixed so that a user can't create a hub with the '/' character. (CHAOS-3753) -### Version 1.29.0 +#### January 2024, Version 1.29.0 -#### New features and enhancements +##### New features and enhancements * Improves the error messages and logs returned to the client in the API to save chaos experiments. (CHAOS-3607) -#### Fixed issues +##### Fixed issues * Linux chaos infrastructure (LCI) installer wasn't executing the script with sudo privileges, which resulted in *Failed to install linux-chaos-infrastructure* error. This issue is now resolved. (CHAOS-3724) @@ -391,9 +388,9 @@ The release notes describe recent changes to Harness Chaos Engineering. * LCI process would get killed due to a lack of memory (OOM) when a high amount of memory was specified during a memory stress fault. This issue is now resolved so that the likeliness of OOM kills during limited memory availability is reduced. (CHAOS-3469) -### Version 1.28.1 +#### January 2024, Version 1.28.1 -#### New features and enhancements +##### New features and enhancements * Adds optimisation to utilise memory efficiently, reduce latency, and enhance server performance. (CHAOS-3581) @@ -409,7 +406,7 @@ The release notes describe recent changes to Harness Chaos Engineering. * Adds support for bulk-disable (disable enabled CRON schedules selected by user) and bulk-enable (enable disabled CRON schedules selected by user) CRON-scheduled experiments, with a limit of 20 experiments for every operation. (CHAOS-3174) -#### Fixed issues +##### Fixed issues * After selecting an experiment, when a user tried to select an active infrastructure for the experiment, the page would throw an error. This is fixed. (CHAOS-3585) @@ -417,8 +414,8 @@ The release notes describe recent changes to Harness Chaos Engineering. * When multiple faults are executed in parallel, faults that transitioned into an "errored" state would not reflect in the logs, whereas faults in **success** state reflected in the logs with an "errored" status. This is fixed. (CHAOS-3363) +
-## Previous releases
2023 releases diff --git a/release-notes/cloud-cost-management.md b/release-notes/cloud-cost-management.md index d647658e940..4aaf8737135 100644 --- a/release-notes/cloud-cost-management.md +++ b/release-notes/cloud-cost-management.md @@ -17,6 +17,22 @@ Review the notes below for details about recent changes to Harness Cloud Cost Ma Harness deploys changes to Harness SaaS clusters on a progressive basis. This means that the features and fixes that these release notes describe may not be immediately available in your cluster. To identify the cluster that hosts your account, go to the **Account Overview** page. ::: +## January 2025 + +### Version 1.38.3 + +#### New features and enhancements + +- Granular Governance Recommendations: We have introduced granular governance recommendations, providing greater detail and actionable insights at the individual resource level. This enhancement improves operational scalability by offering more targeted recommendations for both custom and out-of-the-box (OOTB) rules. Users can now specify the scope when adding recommendations to the Ignore List, with options for Rule-level, Subscription-level, or Resource-level exclusions. [CCM-20482] + +- New Table View in Output Terminal of Evaluations: The output terminal for evaluations in Asset Governance now includes a new table view, with an option to flatten the table for easier analysis. This update enhances the visibility of governance evaluations and allows users to better manage large sets of data. [CCM-20448] + +- Recommendations Insights Tab: Harness CCM now offers a new Recommendations Insights tab in the Asset Governance module, providing users with clear visibility into the evaluation status of each rule. This feature includes detailed insights into the cloud account and region associated with the recommendations. The tab also supports troubleshooting by displaying information on why recommendations may fail, be ignored, or succeed, enabling users to take immediate corrective actions when necessary. + +- Extended Idle Time for AutoStopping Rules: The AutoStopping idle time limit in the UI has been extended to allow users to set up to 24 hours (1440 minutes). [CCM-20484] + +- Overlapping Schedules for AutoStopping Rules: Users can now define multiple fixed schedules within an AutoStopping rule, with support for overlapping schedules. To determine the final resulting schedule at any given time, users can set the priority order of the schedules using drag-and-drop functionality. [CCM-19196] + ## December 2024 diff --git a/release-notes/continuous-delivery.md b/release-notes/continuous-delivery.md index e5c551741f5..e235642a055 100644 --- a/release-notes/continuous-delivery.md +++ b/release-notes/continuous-delivery.md @@ -45,12 +45,89 @@ import Kustomizedep from '/release-notes/shared/kustomize-3-4-5-deprecation-noti
-## December +## January 2025 -### Gitops Version 1.22.0, Gitops Agent Version 0.83.0 +### GitOps Version 1.23.0, GitOps Agent Version 0.84.0 + +#### New features and enhancements + +- The GitOps Agent Argo version has been upgraded from 2.10 to 2.13. (CDS-104976) + +- With GitOps agent version 0.83.x if the agent is at project scope in Harness it will reconcile argo clusters and repos that don't have the "project" field as well ("project" refers to the argoproject). (CDS-105211) + +- The version of SOPS used by Argo has been upgraded from 3.9.0 to 3.9.2. (CDS-105323) + +- Filtering has been greatly improved for GitOps Applications. Here is what you can expect: + - You can now create and save your own custom filters for repeated use. + - Navigate a new filters UI/UX to create and manage your filters easily. + - Added a search box to filter parameter dropdowns. (CDS-101484) + +#### Fixed issues + +- Previously a `duplicate argo project mapping found` error was being thrown incorrectly when using the API endpoint `/api/v1/appprojectsmapping`. This is now fixed. (CDS-105291) +- Improved logging for the service and agent to include versions. (CDS-97354) +- Improved agent deployment via terraform. This includes fixing a health check, fetching an agent token, and adding a hash value in manifests to enforce reloading. (CDS-102304) + +### Version 1.71.2 + +#### New Features and enhancements + +- You can bypass artifact consumption checks for a service in a Deploy stage by checking the **Disable artifact in this stage** checkbox. This feature applies to primary and sidecar artifacts. For more information, go to Harness [Skip Artifact Consumption for the Stage](/docs/continuous-delivery/x-platform-cd-features/services/artifact-sources/#skip-artifact-consumption-for-the-stage). This feature is behind the feature flag `CDS_ARTIFACT_DISABLE_VALIDATION`. Contact [Harness support](mailto:support@harness.io) to enable it. (CDS-96644, ZD-68382) + +- You can now add environments created at the Project and Organization levels to the environment groups. For more information, go to Harness [Cross Scope Environment Groups](https://developer.harness.io/docs/continuous-delivery/x-platform-cd-features/environments/create-environment-groups#cross-scope-environment-groups). Currently, the Cross Scope Environment Groups feature is behind the feature flag `CDS_CROSS_SCOPED_ENV_GROUPS`. Contact [Harness support](mailto:support@harness.io) to enable it. (CDS-93146) + +- You can now reference secret in expression for input variables in TAS command step. For more information, go to Harness [Tanzu Command step](https://developer.harness.io/docs/continuous-delivery/deploy-srv-diff-platforms/tanzu/tanzu-command-step#output-variables). (CDS-100322, ZD-69107, ZD-69226) + +- The Post-Prod Rollback API has been enhanced to simplify the rollback process by reducing the number of required parameters. The new version of the API allows you to trigger a rollback using just Environment Id, Infrastructure Id, Service Id, and Artifact, making it easier for users to invoke the rollback without dealing with complex identifiers like `instanceKey` and `infraMappingId`. (CDS-97775) + +- Harness now provides detailed log information for pods and container during the **Wait For Steady State** step in Kubernetes deployments, helping you troubleshoot deployment issues by providing real-time insights into pod statuses. For more information, go to Harness [Detailed diagnostics for K8s Deployment](https://developer.harness.io/docs/continuous-delivery/deploy-srv-diff-platforms/kubernetes/kubernetes-cd-quickstart#detailed-diagnostics-for-k8s-deployment). Currently, this feature is behind the feature flag `CDS_K8S_DETAILED_POD_LOGS`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. (CDS-99009) + +- `shouldSendTriggerPayload` flag is added in the `eventHistory` API to allow users to exclude the payload from the response, improving performance and preventing issues with large payloads. (PIPE-24223, ZD-75049) #### Fixed Issues +- **Previously**, pipelines could start execution without valid stages, resulting in unexpected behavior and execution graph failures. This issue is **fixed**, and execution is now blocked if no valid stage is present in the pipeline. (**PIPE-24086, ZD-74528**) +- **Previously**, the console log search would not function correctly after switching steps in a pipeline execution. The issue is **fixed**, and the search functionality now continues seamlessly across steps, ensuring a smooth user experience. (**PIPE-23990, ZD-74681**) +- **Previously**, changes in GitHub, such as subsequent commits to newly created branches, were not propagating into Harness. This issue is **fixed**, and changes now propagate correctly. (**PIPE-23576, ZD-73720, ZD-74770**) +- **Previously**, user pipeline execution would stall. This issue is **fixed**. (**PIPE-22375, ZD-71138**) +- **Previously**, the tooltip in the UI did not display that CD consumes 1 Service License for every 2000 pipeline executions of custom stages. The issue is **fixed**, and the tooltip now displays the correct information. (**CDS-105411, ZD-75543**) +- **Previously**, Nexus3 triggers only fetched 50 tags. This issue is **fixed**, and now all tags will be fetched. (**CDS-105392, ZD-75467**) +- **Previously**, fetching clusters on a change of GCP Connector in infrastructure failed. This issue is **fixed**, and clusters now fetch correctly. (**CDS-105365, ZD-74674**) +- **Previously**, the pipeline stage name resolution issue caused artifact identifier expressions to fail, resulting in intermittent pipeline failures with the error: "Invalid request: No artifact source exists with the identifier null inside service." This issue is **fixed**. (**CDS-105306, ZD-75216**) +- **Previously**, secrets were exposed in logs during the execution of TAS Rolling Deploy Step in case of step failure. This issue is **fixed**, and secrets are no longer exposed. (**CDS-105184, ZD-75003**) +- **Previously**, the AWS load balancer dropdown was not populated when using a blue-green deployment step in a template. This issue is **fixed**, and the dropdown now populates correctly. (**CDS-105168, ZD-73560**) +- **Previously**, the Fetch Helm Chart Metadata process for Helm charts failed during parallel stage deployments with the error: **Unable to fetch files for filePath `[charts/spark-support/Chart.yaml]` for Branch: stable.** This issue is **fixed**, and proper delegate selection during the manifest step is ensured. (**CDS-105137, ZD-74836**) +- **Previously**, users were unable to perform rollbacks in certain cases due to the swapping of OrgID and ProjectID in the API call. This issue is **fixed**, and rollbacks now work as expected. (**CDS-105087, ZD-74833**) +- **Previously**, users encountered a "signal is aborted without a reason" notification while selecting a service in the deployment pipeline due to overlapping API calls made in quick succession. This issue is **fixed**, and the error no longer occurs. (**CDS-105011, ZD-74651**) +- **Previously**, additional strings were incorrectly appended to the values file path during runtime when entered as allowed values, causing parsing errors and deployment failures. This issue is **fixed**, and values are now parsed correctly. (**CDS-104787, ZD-74331**) +- **Previously**, connector references for Bitbucket were still appearing in the "referenced by" list after updating pipelines or resources to GitHub. This issue is **fixed**, and references are now removed correctly. (**CDS-104715, ZD-74243**) +- **Previously**, GCP OIDC connections failed when isolated to a specific project/org in Harness. This issue is **fixed**, and the connection now establishes successfully with the correct configuration during the test connection flow. Additionally, the connection test is supported for Platform as well (**CDS-104975, ZD-74230**) +- **Previously**, pipelines using triggers with input sets did not correctly apply the values provided. This issue is **fixed**, and the pipeline execution now uses the correct value from the specified input set. (**PIPE-24088, ZD-74889**) +- **Previously**, GitX auto-creation did not function as expected when YAML files were added to GitHub repositories. This issue is **fixed**, and entities are now created as expected. (**PIPE-22712, ZD-71904**) +- **Previously**, trigger event history was not being saved for scheduled triggers. This issue is **fixed**, and event history is now saved correctly. (**PIPE-24190, ZD-75384**) +- **Previously**, the "References/Referenced By" list did not show the complete list of connectors and secrets being used. This issue is **fixed**, and the list now shows the full set of references. (**PIPE-23709, ZD-74015**) +- **Previously**, when creating a new ASG from the base ASG, the Dimensions were not updated while inheriting the Scaling Policy properties. This issue is **fixed**, and the new ASG name is properly passed and scaling policies are updated. (**CDS-105225, ZD-74995**) +- **Previously**, GitOps steps ignored step, stage, and pipeline delegate selectors, failing to follow the delegate precedence outlined in the documentation. This issue is **fixed**, and GitOps steps now respect the correct delegate precedence. (See more on delegate precedence [here](https://developer.harness.io/docs/platform/delegates/manage-delegates/select-delegates-with-selectors/#delegate-selector-priority)). (**CDS-105143, ZD-74401**) +- **Previously**, editing a connector with the `CDS_DISABLE_CONNECTOR_PT_CREATION` feature flag enabled would reset the Perpetual Task unexpectedly. This issue is **fixed**, and the Heartbeat Perpetual Task is properly deleted during connector edits. (**CDS-104552, ZD-73947**) +- **Previously**, the default pull policy for add-on images under Account > Pipeline Settings was incorrectly set to Always. This issue is **fixed**, and the default is now null, allowing Kubernetes to determine the pull mechanism. (**CDS-101797, ZD-70029**) +- **Previously**, a race condition during pipeline rollback with parallel steps could cause a duplicate key exception. This issue is **fixed**, and race conditions are now properly handled, resolving class cast exceptions. (**PIPE-23275, ZD-73233**) +- **Previously**, execution inputs in chained pipelines would intermittently fail to display, leaving the pipeline in a "Waiting State." This issue is **fixed**, and inputs now display correctly. (**PIPE-22998, ZD-72739**) +- **Previously**, users were unable to extract the logs.zip file after downloading from pipeline execution on Windows machines. This issue is **fixed**, and users can now extract the logs.zip file. (**PIPE-20446, ZD-66561**) +- **Previously**, Azure slot deployments failed with a conflict error: "The 'Preparing zip package' operation conflicts with the pending 'Performing continuous deployment' operation." This issue is **fixed**, and deployments now proceed without conflict. (**CDS-104813, ZD-74163**) +- **Previously**, connectivity errors (e.g., delegate failing to connect due to invalid host configuration or firewall issues) were misclassified as "Unknown Errors." This issue is **fixed**, and connectivity errors are now properly categorized, ensuring that failure strategies execute as intended. (**CDS-104747, ZD-74235**) +- **Previously**, pipelines were intermittently failing with the error message in the helm deployment step: "Error occurred while performing this operation." This issue is **fixed**, and deployments now proceed without errors. (**CDS-104710, ZD-74154**) +- **Previously**, pipelines were failing during deployment to multiple environments due to missing infrastructure definitions, with no proper validation or error message. This issue is **fixed**, and missing infrastructure definitions are properly validated. (**CDS-CDS-104586, ZD-73964**) +- **Previously**, Helm deployments failed during rollback due to missing or invalid revision numbers in Helm history, resulting in errors like NumberFormatException: For input string: "". This issue is **fixed**, and rollbacks now proceed without errors. (**CDS-103746, ZD-72898**) +- **Previously**, original Auto Scaling Group (ASG) tags were removed when selecting the "Base ASG" option in Infrastructure setup, causing permission errors during the creation of new launch templates. This issue is **fixed**, and ASG tags are preserved correctly. (**CDS-103081**) +- **Previously**, using a containerized step group nested within a normal step group caused pipeline execution to fail with a `NullPointerException: Cannot invoke "java.util.List.size()" because "ports" is null.` This issue is fixed, and pipelines with nested containerized step groups now execute successfully without errors. (**CDS-105395, ZD-74949**) +- **Previously**, during load testing, customers faced issues with the delegate thread pool size being too small, leading to failures when executing pipelines concurrently. Additionally, some delegates reported errors due to missing CF CLI versions, and 503 errors occurred due to proxy configuration issues. This issue is fixed by increasing the delegate thread pool size and improving the detection mechanism for CF CLI installations. Customers should now experience more stable pipeline executions, even with larger concurrent loads. (**CDS-103868**) +- **Previously**, some pipelines faced issues with black-screening after approximately 6 hours, despite an increase in the log-service duration from 5 hours to 10 hours. This was due to large log files (over 20k lines) causing disruptions. The issue also involved a discrepancy between the log-service's stream duration and the expected limits, affecting log processing during longer executions. This issue is now fixed by extending the log-service duration to 10 hours and improving the handling of log limits at the account level.(**PIPE-24058, ZD-73735**) + + +## December + +### Gitops Version 1.22.0, Gitops Agent Version 0.83.0 + #### New Features and enhancements - Harness now supports the `prefixed_identifier` field in the GitOps Agent Terraform resource. This field provides a scope-prefixed agent identifier, where the scope (account, org, or project) is constant, and the agentId varies based on the specific agent. @@ -78,7 +155,7 @@ For example, `account.agentId` for Account-level agents, `org.agentId` for Organ - A null pointer exception was thrown when a pipeline was provided a primary artifact, but no artifact was selected in the service. This issue is fixed now. (CDS-104756) - During Autocreation of entities the name and identifier were incorrectly retrieved from the Infrastructure Definition YAML file pulled from Git. The logic was using the wrong key in the YAML, causing it to always fall back on the file name instead. This issue is fixed now. (CDS-104751) -- Previously, when Override V2 was enabled and no `ENV_GLOBAL_OVERRIDE`S or `ENV_SERVICE_OVERRIDES `were present, the system would fall back to reading overrides from the environment configuration YAML. However, the UI did not display information about these YAML-based overrides, causing confusion for users as they were unable to identify or control this behavior. This issue is fixed now and when Override V2 is enabled, overrides from the environment configuration YAML will no longer be considered. This change ensures clarity and aligns the system behavior with customer expectations. This fix is behind the FF `CDS_SERVICE_OVERRIDES_2_0_YAML_V2_SUPPORT`. (CDS-104570, ZD-74034) +- Previously, when Override V2 was enabled and no `ENV_GLOBAL_OVERRIDES` or `ENV_SERVICE_OVERRIDES` were present, the system would fall back to reading overrides from the environment configuration YAML. However, the UI did not display information about these YAML-based overrides, causing confusion for users as they were unable to identify or control this behavior. This issue is fixed now and when Override V2 is enabled, overrides from the environment configuration YAML will no longer be considered. This change ensures clarity and aligns the system behavior with customer expectations. This fix is behind the FF `CDS_SERVICE_OVERRIDES_2_0_YAML_V2_SUPPORT`. (CDS-104570, ZD-74034) - Previously, when a user selected "Deploy to Multiple Environments" inside a stage template and set it as a runtime input, the value was incorrectly treated as a fixed value during pipeline execution. This issue is fixed now. (CDS-104471, ZD-73843) - Previously, GitOps entries did not appear in custom dashboards when the `cd_stage_execution` view was used in Looker. This issue is fixed now. (CDS-103135) @@ -90,6 +167,7 @@ For example, `account.agentId` for Account-level agents, `org.agentId` for Organ - User was not able to provide timeout for Service Now approval step as Input. This issue is fixed now. (PIPE-23742, ZD-73247) - When using blue green deployment step in a template, AWS load balancer dropdown was not getting populated. This issue is fixed now. (CDS-104478, ZD-73560) - Previously, the Shell Script Provisioner step for PDC infrastructure failed when using runtime inputs for `hostAttributes`. This issue is fixed now. (CDS-104659) +- Previously, users faced an issue with ASG deployment when using dynamic target infrastructure provisioning. This issue is fixed now. (CDS-103872) ## November diff --git a/release-notes/continuous-integration.md b/release-notes/continuous-integration.md index 2f2d042777d..0105e733695 100644 --- a/release-notes/continuous-integration.md +++ b/release-notes/continuous-integration.md @@ -1,7 +1,7 @@ --- title: Continuous Integration release notes sidebar_label: Continuous Integration -date: 2024-10-08T10:00 +date: 2025-01-10T10:00 sidebar_position: 10 --- @@ -20,6 +20,25 @@ These release notes describe recent changes to Harness Continuous Integration. ::: +## January 2025 + +### Version 1.60 + + + +#### Fixed issues + +- Resolved an issue where CI build steps could execute out of order or be duplicated in rare cases due to preexisting files in the workspace. Introduced a marker file mechanism to ensure sequential execution. This change is behind the feature flag `CI_ADDON_RETRY_MARKER_FILE`. (CI-14705, ZD-71193, ZD-71443, ZD-74544) +- Addressed an issue where the "Allow Privilege Escalation" flag in the Infrastructure configuration was incorrectly included in the YAML, even when disabled. Enhanced error messaging now notifies users of incorrect Kubernetes flag settings during pipeline execution. (CI-14740, ZD-71175) +- Resolved an issue in Kubernetes pipelines where large commit messages caused the pipeline to fail with the error: "Request entity too large: limit is 3145728." Commit message length is now properly limited to prevent this error. (CI-15276, ZD-73618) +- Resolved an issue where blank optional fields in templates for ACR steps defaulted to a "null" string, causing failures. This issue was specific to accounts using the Kaniko plugin. The fields now correctly default to being skipped if left blank. (CI-15431, ZD-71473) +- Resolved an issue where Bitbucket connectivity in Harness Cloud failed when using Secure Connect on macOS. (CI-15432, ZD-74614) + +#### Harness images updates + +| **Image** | **Change** | **Previous version** | **New Version** | +| ---------------- | ---------------------------------- | -------------------- | --------------- | +| `plugins/docker` | Addressed security vulnerabilities | 20.18.5 | 20.18.6 | ## December 2024 @@ -40,47 +59,47 @@ If you have any questions or need assistance with the whitelisting process, plea ::: ### Version 1.58 - + #### New features and enhancements + - Expanded Cache Intelligence functionality to include support for VB and F# with .NET (CI-14396). -- Added ability to set `topologySpreadConstraint` in 'Pod Spec Overlay' field for k8s build pods from the UI editor. This was previously available in YAML editor only (CI-14169). +- Added ability to set `topologySpreadConstraint` in 'Pod Spec Overlay' field for k8s build pods from the UI editor. This was previously available in YAML editor only (CI-14169). - Tests related output variables are now available when publishing test reports through 'Run' and 'Test' steps, capturing the number of total tests, skipped tests, failed tests, etc. This is currently available when enabling the `CI_ENABLE_TEST_SUMMARY_AS_OUTPUTS` feature flag. Supported with Kubernetes infrastructure. (CI-15281) - -To enable feature flags, please contact [Harness Support](mailto:support@harness.io). +To enable feature flags, please contact [Harness Support](mailto:support@harness.io). #### Fixed issues + - Resolved an issue where Python script execution in Harness pipelines failed with vague error messages when an output variable was configured but not assigned a value. Updated error messages to explicitly identify the unset variable, improving clarity and user experience. (CI-14053, ZD-69167) - Fixed an issue where Docker build arguments defined as comma-separated strings (e.g., a1,a2,a3) were incorrectly parsed (CI-14845). - Addressed an issue where Docker Layer Caching in 'Build and Push' steps failed with the error: "error while bootstrapping buildx builder" when running on Kubernetes. (CI-15228, ZD-73397) #### Harness images updates -| **Image** | **Change** | **Previous version** | **New Version** -|-------------------------------|-----------------|-------------|------------------| -| `harness/ci-addon` | Add support for exposing test report summary with output variables | 1.16.61 | 1.16.66 -| `harness/ci-lite-engine` | Add support for exposing test report summary with output variables | 1.16.61 | 1.16.66 -| `plugins/buildx` | Update buildx version to include buildkit in image | 1.1.20 | 1.1.24 -| `plugins/cache` | update cache intelligence plugin tags. updated tag fixes windows restore bug and adds support for vb .net and f# .net | 1.8.0 | 1.9.0 - +| **Image** | **Change** | **Previous version** | **New Version** | +| ------------------------ | --------------------------------------------------------------------------------------------------------------------- | -------------------- | --------------- | +| `harness/ci-addon` | Add support for exposing test report summary with output variables | 1.16.61 | 1.16.66 | +| `harness/ci-lite-engine` | Add support for exposing test report summary with output variables | 1.16.61 | 1.16.66 | +| `plugins/buildx` | Update buildx version to include buildkit in image | 1.1.20 | 1.1.24 | +| `plugins/cache` | update cache intelligence plugin tags. updated tag fixes windows restore bug and adds support for vb .net and f# .net | 1.8.0 | 1.9.0 | ### Version 1.57 - -- Resolved an issue with ECR image links in the artifacts tab for the "Build and Push to ECR" step by adding the missing "/_" separator, ensuring correct functionality. (CI-15089, ZD-72329). -- Resolved an issue where OPA enforcement in CI stages didn't work properly in the Self-Managed Enterprise Edition (CI-14840, ZD-70943). + +- Resolved an issue with ECR image links in the artifacts tab for the "Build and Push to ECR" step by adding the missing "/\_" separator, ensuring correct functionality. (CI-15089, ZD-72329). +- Resolved an issue where OPA enforcement in CI stages didn't work properly in the Self-Managed Enterprise Edition (CI-14840, ZD-70943). ## November 2024 - - ### Version 1.56 + #### Fixed issues + - Resolved an issue to ensure proper functionality for "Upload Artifact to S3" and "Save/Restore Cache to S3" steps when used with AWS connector configured with an External ID (CI-14214, ZD-69360). - Fixed an issue where the DRONE_REPO_OWNER built-in environment variable pointed to the wrong owner when the CI pipeline was triggered by a tag event. A new feature flag (CI_DRONE_REPO_OWNER) has been introduced to ensure DRONE_REPO_OWNER is correctly extracted from the repository URL (CI-14468). - The volume size field, in CI stage Kubernetes infrastructure settings, has been updated to support expressions, improving user experience and functionality (CI-14043, ZD-69169). @@ -92,47 +111,51 @@ To enable feature flags, please contact [Harness Support](mailto:support@harness #### Harness images updates -| **Image** | **Change** | **Previous version** | **New Version** -|-------------------------------|-----------------|-------------|------------------| -| `plugins/s3` | Added PLUGIN_USER_ROLE_EXTERNAL_ID to pass external ID for the secondary role when required | 1.4.3 | 1.5.0 -| `plugins/gcs ` | Go and alpine version upgrade to 1.22.7 and 3.20 respectively | 1.6.0 | 1.6.1 -| `drone-buildx` | Go and docker version upgrade to 1.22.7 and 27.3.1-dind respectively| 1.1.19 | 1.1.20 - +| **Image** | **Change** | **Previous version** | **New Version** | +| -------------- | ------------------------------------------------------------------------------------------- | -------------------- | --------------- | +| `plugins/s3` | Added PLUGIN_USER_ROLE_EXTERNAL_ID to pass external ID for the secondary role when required | 1.4.3 | 1.5.0 | +| `plugins/gcs ` | Go and alpine version upgrade to 1.22.7 and 3.20 respectively | 1.6.0 | 1.6.1 | +| `drone-buildx` | Go and docker version upgrade to 1.22.7 and 27.3.1-dind respectively | 1.1.19 | 1.1.20 | ### Version 1.54 + #### New features and enhancements -- [Build Intelligence](https://developer.harness.io/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-intelligence) now supports self-hosted builds in Kubernetes. Customer that run builds on Kubernetes can now configure S3-compatible bucket for Build Intelligence caching. Authentication through AWS/GCP connector is currently supported with OIDC or Access Key/Secret Key.only. *Note*: This feature requires the following feature flags to be enabled: `CI_CACHE_ENABLED`, `CI_ENABLE_BUILD_CACHE_K8` and `PL_GCP_OIDC_AUTHENTICATION` for GCP or `CDS_AWS_OIDC_AUTHENTICATION` for AWS. -- Secure Connect is now supported with JFrog Artifactory connector (CI-15004). + +- [Build Intelligence](https://developer.harness.io/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-intelligence) now supports self-hosted builds in Kubernetes. Customer that run builds on Kubernetes can now configure S3-compatible bucket for Build Intelligence caching. Authentication through AWS/GCP connector is currently supported with OIDC or Access Key/Secret Key.only. _Note_: This feature requires the following feature flags to be enabled: `CI_CACHE_ENABLED`, `CI_ENABLE_BUILD_CACHE_K8` and `PL_GCP_OIDC_AUTHENTICATION` for GCP or `CDS_AWS_OIDC_AUTHENTICATION` for AWS. +- Secure Connect is now supported with JFrog Artifactory connector (CI-15004). - Support for Docker Build Secrets in "Build and Push" Steps - You can now configure Docker build secrets in the Build and Push step using YAML. This feature allows specifying secrets via `envDockerSecrets` and/or `fileDockerSecrets` field, applicable when running build-and-push steps using Buildx (not Kaniko). Note that using Buildx in Kubernetes build infrastructure requires privileged access. -*Note*: This feature requires the feature flag `CI_USE_BUILDX_ON_K8` to be enabled when running builds in Kubernetes. -- Added support for increasing execution log size limit from 5mb to 25mb, when running builds in Kubernetes. This feature requires the feature flag `CI_INCREASE_LOG_LIMIT` to be enabled, and is supported on Kubernetes build infrastructure only (PIPE-22885). + _Note_: This feature requires the feature flag `CI_USE_BUILDX_ON_K8` to be enabled when running builds in Kubernetes. +- Added support for increasing execution log size limit from 5mb to 25mb, when running builds in Kubernetes. This feature requires the feature flag `CI_INCREASE_LOG_LIMIT` to be enabled, and is supported on Kubernetes build infrastructure only (PIPE-22885). -To enable feature flags, please contact [Harness Support](mailto:support@harness.io). +To enable feature flags, please contact [Harness Support](mailto:support@harness.io). #### Fixed issues + - Improved secret error debugging for pipeline variables - when referencing a non-existent secret in a pipeline variable, the error message now provides actionable details to help debug, rather than a generic exception (CI-15013) #### Harness images updates -| **Image** | **Change** | **Previous version** | **New Version** -|-------------------------------|-----------------|-------------|------------------| -| `drone/buildx` | Revert base64 support added to handle secrets with special characters | 1.1.16 | 1.1.19 -| `harness/ci-addon` | Log Service - ability to increase limit for logs in K8S | 1.59 | 1.61 -| `harness/ci-lite-engine` | Log Service - ability to increase limit for logs in K8S | 1.591.59 | 1.611.61 - +| **Image** | **Change** | **Previous version** | **New Version** | +| ------------------------ | --------------------------------------------------------------------- | -------------------- | --------------- | +| `drone/buildx` | Revert base64 support added to handle secrets with special characters | 1.1.16 | 1.1.19 | +| `harness/ci-addon` | Log Service - ability to increase limit for logs in K8S | 1.59 | 1.61 | +| `harness/ci-lite-engine` | Log Service - ability to increase limit for logs in K8S | 1.591.59 | 1.611.61 | ### Version 1.53 + #### New features and enhancements -- Self-Hosted Cache Intelligence and Docker Layer Caching Enhancements - This release introduces enhancements for self-hosted builds, allowing seamless configuration of S3-compatible caching with AWS or GCP connectors using OIDC for authentication These options are behind the feature flags `CI_ENABLE_DLC_SELF_HOSTED` (for Docker layer caching) and `CI_ENABLE_CACHE_INTEL_SELF_HOSTED` (for Cache Intelligence). + +- Self-Hosted Cache Intelligence and Docker Layer Caching Enhancements - This release introduces enhancements for self-hosted builds, allowing seamless configuration of S3-compatible caching with AWS or GCP connectors using OIDC for authentication These options are behind the feature flags `CI_ENABLE_DLC_SELF_HOSTED` (for Docker layer caching) and `CI_ENABLE_CACHE_INTEL_SELF_HOSTED` (for Cache Intelligence). - OIDC, previously available only for Harness Cloud, is now supported for self-hosted builds running on Kubernetes, enhancing security and simplifying authentication. OIDC is currently behind feature flags `PL_GCP_OIDC_AUTHENTICATION` for GCP and `CDS_AWS_OIDC_AUTHENTICATION` for AWS -To enable feature flags, please contact [Harness Support](mailto:support@harness.io). +To enable feature flags, please contact [Harness Support](mailto:support@harness.io). #### Fixed issues + - Storing secrets in custom secret managers is now supported for cache intelligence in the self-hosted flow. (CI-14719, ZD-71881) - Fixed an issue where bitbucket tag builds with tags containing slashes were causing errors in execution due to `<+codebase.commitSha>` returning null. Harness now correctly supports tags with slashes for bitbucket and git builds, ensuring SHA values are properly referenced. (CI-14706, ZD-70972) - Addressed an issue where pipelines failed at the clone codebase step on Windows infrastructure when using the GitHub SSH connector and cloning using LFS. (CI-14592, ZD-70570, ZD-71715) @@ -140,12 +163,11 @@ To enable feature flags, please contact [Harness Support](mailto:support@harness #### Harness images updates -| **Image** | **Change** | **Previous version** | **New Version** -|-------------------------------|-----------------|-------------|------------------| -| `harness/drone-git` | Fixed an issue when using the GitHub SSH connector on Windows and cloning while fetching LFS files (CI-14592) | 1.61 | 1.62 -| `harness/ci-addon` | Improved "Copy to Clipboard" functionality for pipeline output logs (CI-14200)| 1.16.58 | 1.16.59 -| `plugin/artifactory` | Added support for Secure Connect (CI-14921)| 1.7.0 | 1.7.1 - +| **Image** | **Change** | **Previous version** | **New Version** | +| -------------------- | ------------------------------------------------------------------------------------------------------------- | -------------------- | --------------- | +| `harness/drone-git` | Fixed an issue when using the GitHub SSH connector on Windows and cloning while fetching LFS files (CI-14592) | 1.61 | 1.62 | +| `harness/ci-addon` | Improved "Copy to Clipboard" functionality for pipeline output logs (CI-14200) | 1.16.58 | 1.16.59 | +| `plugin/artifactory` | Added support for Secure Connect (CI-14921) | 1.7.0 | 1.7.1 | ## October 2024 @@ -155,29 +177,27 @@ To enable feature flags, please contact [Harness Support](mailto:support@harness #### New features and enhancements -- Harness CI now supports **Hardware Acceleration** with nested virtualization on Linux runners with AMD architecture. This enhancement accelerates Android emulation, enabling faster and more efficient Android test execution within virtualized CI environments. See [documentation](https://developer.harness.io/docs/continuous-integration/use-ci/set-up-build-infrastructure/use-harness-cloud-build-infrastructure#hardware-acceleration) for more information. -- Introduced a new plugin, `plugins/test-analysis` to support managing flaky tests by introducing a quarantine mechanism. This helps teams to reduce false positives in CI by isolating non-critical, known flaky test failures. By using a quarantine list, the plugin prevents disruptions caused by unreliable tests, allowing teams to focus on true failures and improve test suite reliability without unnecessary pipeline failures. See [plugin README](https://github.com/harness-community/parse-test-reports/blob/main/README.md) for more information (CI-13605). -- Added support for setting display name, which will appear for URLs published in the Artifacts tab, when using the plugin `plugins/artifact-metadata-publisher`. See [documentation](https://developer.harness.io/docs/continuous-integration/use-ci/build-and-upload-artifacts/artifacts-tab/#file_urls) for more information (CI-12176). +- Harness CI now supports **Hardware Acceleration** with nested virtualization on Linux runners with AMD architecture. This enhancement accelerates Android emulation, enabling faster and more efficient Android test execution within virtualized CI environments. See [documentation](https://developer.harness.io/docs/continuous-integration/use-ci/set-up-build-infrastructure/use-harness-cloud-build-infrastructure#hardware-acceleration) for more information. +- Introduced a new plugin, `plugins/test-analysis` to support managing flaky tests by introducing a quarantine mechanism. This helps teams to reduce false positives in CI by isolating non-critical, known flaky test failures. By using a quarantine list, the plugin prevents disruptions caused by unreliable tests, allowing teams to focus on true failures and improve test suite reliability without unnecessary pipeline failures. See [plugin README](https://github.com/harness-community/parse-test-reports/blob/main/README.md) for more information (CI-13605). +- Added support for setting display name, which will appear for URLs published in the Artifacts tab, when using the plugin `plugins/artifact-metadata-publisher`. See [documentation](https://developer.harness.io/docs/continuous-integration/use-ci/build-and-upload-artifacts/artifacts-tab/#file_urls) for more information (CI-12176). #### Fixed issues -- Resolved an issue where excessive logging of the "sanitizeStreamLogs: sanitizing lines" message was flooding the engine and add-on logs. Additionally, a monitoring log line that was previously removed, impacting customer monitoring, has been restored. (CI-14640, ZD-71067) +- Resolved an issue where excessive logging of the "sanitizeStreamLogs: sanitizing lines" message was flooding the engine and add-on logs. Additionally, a monitoring log line that was previously removed, impacting customer monitoring, has been restored. (CI-14640, ZD-71067) - Resolved an issue where sessions were initiated without credential information. The update ensures sessions are now created with the correct credentials, enabling cross-account authentication (CI-14134, ZD-69447) #### Harness images updates -| **Image** | **Change** | **Previous version** | **New Version** -|-------------------------------|-----------------|-------------|------------------| -| `harness/ci-addon` | Updated base image and go version, addressing security vulnerabilities (CI-14173) | 1.16.57 | 1.16.58 -| `harness/ci-addon` | Removed a log line in lite-engine that was breaking a customer's monitoring process (CI-14735)| 1.16.58 | 1.16.58 -| `harness/ci-lite-engine` | Updated base image and go version, addressing security vulnerabilities (CI-14173) | 1.16.57 | 1.16.58 -| `harness/ci-lite-engine` | Removed a log line in lite-engine that was breaking a customer's monitoring process (CI-14735)| 1.16.56 | 1.16.57 -| `harness/drone-git` | Updated the base image, addressing security vulnerabilities | 1.6.0 | 1.6.1 -| `plugins/artifact-metadata-publisher` | Added support for setting display name, which will appear for URLs published in the Artifacts tab (CI-12176) | 1.0.0 | 2.0.0 -| `plugins/s3` | Updated session creation to include credential information, allowing for successful cross-account authentication. (CI-14134, ZD-69447) | 1.4.1 | 1.4.3 -| `drone-kaniko` | Added support for AWS OIDC in kaniko-ecr (CI-14242)| 1.10.1 | 1.10.2 - - +| **Image** | **Change** | **Previous version** | **New Version** | +| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | --------------- | +| `harness/ci-addon` | Updated base image and go version, addressing security vulnerabilities (CI-14173) | 1.16.57 | 1.16.58 | +| `harness/ci-addon` | Removed a log line in lite-engine that was breaking a customer's monitoring process (CI-14735) | 1.16.58 | 1.16.58 | +| `harness/ci-lite-engine` | Updated base image and go version, addressing security vulnerabilities (CI-14173) | 1.16.57 | 1.16.58 | +| `harness/ci-lite-engine` | Removed a log line in lite-engine that was breaking a customer's monitoring process (CI-14735) | 1.16.56 | 1.16.57 | +| `harness/drone-git` | Updated the base image, addressing security vulnerabilities | 1.6.0 | 1.6.1 | +| `plugins/artifact-metadata-publisher` | Added support for setting display name, which will appear for URLs published in the Artifacts tab (CI-12176) | 1.0.0 | 2.0.0 | +| `plugins/s3` | Updated session creation to include credential information, allowing for successful cross-account authentication. (CI-14134, ZD-69447) | 1.4.1 | 1.4.3 | +| `drone-kaniko` | Added support for AWS OIDC in kaniko-ecr (CI-14242) | 1.10.1 | 1.10.2 | ### Version 1.50 @@ -185,7 +205,7 @@ To enable feature flags, please contact [Harness Support](mailto:support@harness #### Fixed issues -- Resolved an issue in the build input settings that occurred during manual build runs. When selecting the Pull Request option under build type, the focus would occasionally shift to the Tags option instead. This issue has been fixed to ensure proper selection. (CI-14571, ZD-70841) +- Resolved an issue in the build input settings that occurred during manual build runs. When selecting the Pull Request option under build type, the focus would occasionally shift to the Tags option instead. This issue has been fixed to ensure proper selection. (CI-14571, ZD-70841) - Fixed an issue on the CI Execution Summary page where branch and tag names were being truncated in some cases. The layout has been updated to display the text correctly. (CI-14539) @@ -231,7 +251,7 @@ To enable feature flags, please contact [Harness Support](mailto:support@harness #### New features and enhancements -- Added support for setting Topology Spread Constraints to Kubernetes build pods. A new property, `podSpecOverlay`, has been introduced in the Kubernetes infrastructure properties within the CI stage, allowing users to apply additional settings to the build pod. Currently, this field supports specifying `topologySpreadConstraint`, with plans to extend support for additional configurations in the future. This feature requires using delegate version 24.09.83900 or higher (CI-14033) +- Added support for setting Topology Spread Constraints to Kubernetes build pods. A new property, `podSpecOverlay`, has been introduced in the Kubernetes infrastructure properties within the CI stage, allowing users to apply additional settings to the build pod. Currently, this field supports specifying `topologySpreadConstraint`, with plans to extend support for additional configurations in the future. This feature requires using delegate version 24.09.83900 or higher (CI-14033) - Added the ability to exclude connectors from the preflight check. This can be configured in the connector YAML by setting the property `ignoreTestConnection` to `true`. If the user sets this flag as `true` along with the feature flag `CI_IGNORE_TEST_CONNECTION` enabled, no matter the configuration, the connection test will always be marked as **Successful**. The feature is gated behind the feature flag `CI_IGNORE_TEST_CONNECTION`. (CI-13806, ZD-65275,65643) diff --git a/release-notes/delegate.md b/release-notes/delegate.md index 527a3d9630c..620eabe9eca 100644 --- a/release-notes/delegate.md +++ b/release-notes/delegate.md @@ -87,6 +87,22 @@ For more information, go to [Delegate expiration support policy](/docs/platform/ ::: +## January 2025 + +### Version 24.12.84708 + +#### Hotfix + +- Add support for Incomplete Responses from New Relic during verify step execution. (CDS-105146) + +### Version 25.01.84800 + +#### New features and improvements + +- Upgraded delegate base image from `redhat/ubi8-minimal:8.10` to `redhat/ubi9-minimal:9.4` (PL-58376) + +- Added a new metric on the delegate side to track the number of times the delegate WebSocket reconnects. This metric, `io_harness_custom_metric_delegate_reconnected_total`, can be used to set alerts for frequent reconnections, helping identify potential issues with the delegate and enabling you to seek further assistance from Harness if needed. (PL-48535) + ## December 2024 ### Version 24.11.84503 diff --git a/release-notes/feature-flags.md b/release-notes/feature-flags.md index 6badda3df66..f5834164451 100644 --- a/release-notes/feature-flags.md +++ b/release-notes/feature-flags.md @@ -1,7 +1,7 @@ --- title: Feature Flags release notes sidebar_label: Feature Flags -date: 2024-11-13T08:09:25 +date: 2025-01-16T08:09:25 tags: [NextGen, "feature flags"] sidebar_position: 11 @@ -11,16 +11,59 @@ sidebar_position: 11 Review the notes below for details about recent changes to Harness Feature Flags (FF). For release notes for Harness Self-Managed Enterprise Edition, go to [Self-Managed Enterprise Edition release notes](/release-notes/self-managed-enterprise-edition). Additionally, Harness publishes security advisories for every release. Go to the [Harness Trust Center](https://trust.harness.io/?itemUid=c41ff7d5-98e7-4d79-9594-fd8ef93a2838&source=documents_card) to request access to the security advisories. + + :::info note Harness deploys changes to Harness SaaS clusters on a progressive basis. This means that the features and fixes that these release notes describe may not be immediately available in your cluster. To identify the cluster that hosts your account, go to the **Account Overview** page. ::: -#### Last updated: November 13, 2024 +#### Last updated: January 16, 2025 + +## December 2024 + +### Android Client SDK + +**New features and enhancements**: + - Remove unused `org.threeten:threetenbpdependency`. (FFM-12237) ## November 2024 ### Ruby Server SDK +#### Version 1.4.5 + +**Fixed Issues**: + Following from 1.4.3, we are still investigating an edge case in the SDK, where segmentation faults can occur when the SDK aggregates and sends metrics at the end of an interval: + - Evaluation metrics now use String keys, instead of class based keys. (FFM-12192) + +#### Version 1.4.4 + +**Fixed Issues**: + Following from 1.4.3, we are still investigating an edge case in the SDK, where segmentation faults can occur when the SDK aggregates and sends metrics at the end of an interval: + - Replaces `concurrent-ruby/ConcurrentMap` with our own thread safe hash implementation. + - Fixed race condition if the optional `timeout_ms` argument is provided to `wait_for_initialization` and the SDK times out and initializes at the same time which could cause undefined behaviour for the lifetime of the SDK. (FFM-12192) + + We have fixed some behaviour around default variations being returned: + - Default variations are returned immediately: + - if the SDK is not initialized or still initializing. This avoids potentially incomplete evaluations caused by partial cache data. + - if a flag cannot be found and logs a warning. + - if a requested variation does not match the flag type, the SDK now returns the default variation and logs a warning. (FFM-12192) + +#### Version 1.4.3 + +**Fixed Issues**: +- Resolves an issue where Segmentation faults can occur on Ruby 3.4 and above. (FFM-12192) + #### Version 1.4.2 **Fixed Issues**: @@ -28,6 +71,13 @@ Harness deploys changes to Harness SaaS clusters on a progressive basis. This me invalid metric events shortly after the SDK has initialised. This patch includes possible fixes for this issue. (FFM-12192) +#### Version 1.4.1 + +**Fixed Issues**: + - Skips processing invalid metrics if they are detected. We are currently investigating an edge case in the SDK, where very large projects can generate + invalid metric events shortly after the SDK has initialised. This patch release ensures these invalid metrics events are skipped, and a warning is logged + if the SDK encounters them. The impact of flag evaluation metrics will not include any events that have been skipped. (FFM-12192) + ## October 2024 ### Relay Proxy diff --git a/release-notes/helm-chart-provenance.md b/release-notes/helm-chart-provenance.md new file mode 100644 index 00000000000..69cda00fd06 --- /dev/null +++ b/release-notes/helm-chart-provenance.md @@ -0,0 +1,79 @@ +--- +title: Harness Helm Charts Provenance +sidebar_label: Harness Helm Charts Provenance +date: 2025-01-09T23:00 +sidebar_position: 17 +--- + +# Harness Helm Charts Provenance + +Harness Helm charts are now signed to ensure they are secure and trustworthy. + +Starting with version [0.24.0](../release-notes/self-managed-enterprise-edition.md), you can verify the integrity and origin of the charts using GPG keys with Helm's provenance feature. + +## How to Verify Signed Helm Charts + +### Step 1: Install GnuPG +First, ensure you have GnuPG installed to handle the GPG keys. + +```bash +apk add --no-cache gnupg +``` + +### Step 2: Import the GPG Public Key +Import the Harness public key used to sign the charts. This key will be used to verify the signature. + +```bash +gpg --keyserver hkps://keys.openpgp.org --receive-keys '6117ED4CA5F4605DBF4353F41F6E943934E6D138' +``` + +### Step 3: Convert Keyring to Legacy Format +Convert the GPG keyring to a legacy format to be recognized by Helm provenance verification. + +```bash +gpg --export >~/.gnupg/pubring.gpg +gpg --export-secret-keys >~/.gnupg/secring.gpg +``` + +### Step 4: Verify the Helm Chart +Helm charts can be verified by downloading the chart or pulling it from the Helm repository. + +#### a. Verify Downloaded Chart +Download the Helm chart and its corresponding provenance file from the GitHub releases page (`*.tgz` and `*.tgz.prov` under Assets). + +```bash +helm verify harness-0.24.0.tgz +``` + +#### b. Verify Using Helm Repository +1. Add the Harness Helm repository: + + ```bash + helm repo add harness https://harness.github.io/helm-charts/ + ``` + +2. Update the Helm repository: + + ```bash + helm repo update + ``` + +3. Pull the chart and verify it with the specified chart version: + + ```bash + helm pull --verify harness/harness --version=0.24.0 + ``` + +### Step 5: Successful Verification +A successful verification will display output similar to the following: + +```plaintext +Signed by: Harness Inc. (Main key for Helm chart signing) +Using Key With Fingerprint: 6117ED4CA5F4605DBF4353F41F6E943934E6D138 +Chart Hash Verified: sha256:a1af3a0b8b54050070e15953c1c964a595720be2640c59fb2df947c259d18247 +``` +*** + +:::info Additional Information +For more details on Helm chart signing and verification, please refer to the [official Helm documentation](https://helm.sh/docs/topics/provenance/). +::: \ No newline at end of file diff --git a/release-notes/platform.md b/release-notes/platform.md index 81177daf367..cad9952a9d6 100644 --- a/release-notes/platform.md +++ b/release-notes/platform.md @@ -77,6 +77,45 @@ The following deprecated API endpoints are longer supported: - POST api/resourcegroup/filter - GET api/resourcegroup +## January 2025 +### Version 1.71.x +#### Fixed issues + +- On the RoleDetailsPage, permissions are now sorted alphabetically to enhance ease of discovery and navigation, replacing the previous random order. (PL-59350) + +- Fixed the pagination response for the User Group List API. The User Groups GET API now correctly returns the total pages and page count. [Learn more](https://apidocs.harness.io/tag/User-Group#operation/getUserGroupList) (PL-59214) + +- Fixed an issue where navigating via URL to view a specific file loaded the correct line initially but later displayed an incorrect line number. The file and line number now remain consistent. (PL-59002) + +- Added ng-manager virtual service routes in the chart to support GitX webhook APIs at both org and project levels. (PL-58990) + +- Fixed an issue where the 'Apply' button remained enabled and caused an error when clicked, even with no changes to assignments. The 'Apply' button is now correctly disabled when there are no changes. (PL-58782) + +- Updated the text input to accept only the domain instead of the full email. Placeholder changed from 'Eg: abc@harness.io' to 'Eg: harness.io'. (PL-57883) + +- Added search functionality to the Dashboard's Resource Groups modal for improved navigation. (PL-57783) + +#### New features and enhancements + +- "Added a feature on the delegate list page to allow customers to easily differentiate polling mode delegates. This feature is available behind the flag `PL_SHOW_IF_POLLING_MODE_DELEGATE`." (PL-58853) + +- Support has been added to configure either 'Force Delete without recovery' or a 'Recovery Window' when creating or updating the AWS Secret Manager Connector. These settings will be used in the delete secret request and work as follows: + + 1. **Force Delete without recovery**: When enabled, the request to AWS will immediately delete the secret without recovery options. + 2. **Recovery Window (in days)** (optional): If set, the deletion request will be made to AWS with the specified recovery window. The secret will be deleted in Harness but can still be recovered in AWS Secret Manager within the recovery window. (PL-58225) + +- All delegates in CG will now start disconnecting for customers who are not using CG. (PL-59298) + +- Added support for expressions (webhook url,custom header value, etc) in the values of webhook headers in the Centralized Notification System. (PL-58248) + +- Minio image has been updated to version 2024.9.22-debian-12-r1. (PL-59170) + +- Updated the TimescaleDB chart to use pg13.16-ts2.15.3-oss to reduce vulnerabilities. (PL-58116) + +- Upgraded the `org.asynchttpclient_async-http-client` to version 3.0.1. (PL-59246) + +- Upgraded the delegate base image from `ubi8-minimal:8.10` to `ubi9-minimal:9.4`. (PL-58376) + ## December 2024 ### Version 1.69.x #### Fixed issues diff --git a/release-notes/security-testing-orchestration.md b/release-notes/security-testing-orchestration.md index d42802b4e96..4380c0135a0 100644 --- a/release-notes/security-testing-orchestration.md +++ b/release-notes/security-testing-orchestration.md @@ -22,6 +22,19 @@ These release notes describe recent changes to Harness Security Testing Orchestr ::: +## January 2025 + +### Version 1.122.1 + + + +#### New Features and Enhancements +- Enhanced the Veracode step in STO to use an existing Veracode application with the Target name specified during the step configuration. If no matching application exists, the step creates a new one. (STO-8359) + +#### Fixed Issues +- Fixed an issue where Trivy scans failed if no vulnerabilities were found but secrets were detected; the scan now passes successfully, reporting detected secrets with no vulnerabilities (STO-8404, ZD-75298). +- Fixed an issue that prevented the creation of a new Veracode Sandbox from the STO Veracode step (STO-8377, ZD-72535). + ## December 2024 ### Version 1.120.4 diff --git a/release-notes/self-managed-enterprise-edition.md b/release-notes/self-managed-enterprise-edition.md index 0fde4b2b3da..9b2df3849bc 100644 --- a/release-notes/self-managed-enterprise-edition.md +++ b/release-notes/self-managed-enterprise-edition.md @@ -59,19 +59,25 @@ upgrades: ### Breaking change - Ingress -:::danger important upgrade instructions for versions 0.17.x and above +:::danger important upgrade instructions for versions 0.17.x, 0.18.x, 0.19.x, 0.20.x, 0.21.x, 0.22.x, 0.23.x and 0.24.x When upgrading to SMP versions 0.17.x and above, the installation may fail if you have any admission webhooks configured for Ingress that do not permit Ingress objects with different names but identical paths. To prevent installation issues, please follow these steps before proceeding with the upgrade: - - 1. Run the `update-ingress-objects.sh` script. - - 2. The script will prompt you to enter the namespace where Harness is installed. - - 3. You will then be asked to provide the version you are upgrading to. For instance, if you are upgrading to Harness 0.21.0, you should input 0.21.0. - - 4. The script will automatically update the Ingress objects as needed. -You can find the script in the Harness 0.21.0 release charts at `scripts/update-ingress-objects.sh`, or you can run it directly from this URL: [update-ingress-objects.sh](https://raw.githubusercontent.com/harness/helm-charts/main/src/harness/scripts/update-ingress-objects.sh). + 1. Download the `update-ingress-objects.sh` script from this URL: [update-ingress-objects.sh](https://raw.githubusercontent.com/harness/helm-charts/main/src/harness/scripts/update-ingress-objects.sh). + ``` + # Using curl + curl -s https://raw.githubusercontent.com/harness/helm-charts/main/src/harness/scripts/update-ingress-objects.sh -o update-ingress-objects.sh + chmod +x update-ingress-objects.sh + ./update-ingress-objects.sh + ``` + + 2. The script will prompt you to enter the namespace where Harness is installed. + + 3. You will then be asked to provide the version you are upgrading to. + For instance, if you are upgrading to Harness 0.21.0, you should input 0.21.0. + 4. For versions 0.21.x and above, you will be asked to enter the release name as well. You can check release name by running 'helm ls -n $NAMESPACE' + + 5. The script will automatically update the Ingress objects as needed. Note: Ensure you have access to the Kubernetes cluster where Harness is running and have the necessary permissions to GET, DELETE, and APPLY Ingress objects. ::: @@ -220,6 +226,282 @@ To fix this issue, follow these steps By doing this, you ensure that the same lookerMasterKey is used during upgrades, avoiding encryption issues. ::: +:::info Harness Helm Chart Provenance + +Harness Helm charts are now signed to ensure they are secure and trustworthy. Click [here](../docs/self-managed-enterprise-edition/install/helm-chart-provenance) to learn more. + +::: + +## Jan 07, 2025, Version 0.24.0 + +This release includes the following Harness module and component versions. + +| **Name** | **Version** | +| :-- | :--: | +| Helm Chart | [0.24.0](https://github.com/harness/helm-charts/releases/tag/harness-0.24.0) | +| Air Gap Bundle | [0.24.0](https://console.cloud.google.com/storage/browser/smp-airgap-bundles/harness-0.24.0) | +| NG Manager | 1.68.4 | +| CI Manager | 1.57.5 | +| Pipeline Service | 1.107.4 | +| Platform Service | 1.46.1 | +| Access Control Service | 1.68.0 | +| Delegate | 24.11.84503 | +| GitOps Service | 1.20.6 | +| Change Data Capture | 1.39.2 | +| STO Core | 1.120.4 | +| Test Intelligence Service | 1.42.1 | +| NG UI | 1.54.4 | +| LE NG | 1.5.6 | +| Looker | 1.7.0 | +| Log Service | 1.14.6 | + +#### Alternative air gap bundle download method + +Some admins might not have Google account access to download air gap bundles. As an alternative, you can use `gsutil`. For `gsutil` installation instructions, go to [Install gsutil](https://cloud.google.com/storage/docs/gsutil_install) in the Google Cloud documentation. + +``` +gsutil -m cp \ + "gs://smp-airgap-bundles/harness-0.24.0/ccm_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.24.0/cdng_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.24.0/ce_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.24.0/code_images.tgz" + "gs://smp-airgap-bundles/harness-0.24.0/ci_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.24.0/ff_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.24.0/platform_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.24.0/sto_images.tgz" \ + . +``` + +### New features and enhancements + +#### Harness Platform + +- Library upgrades done on Harness Platform +
+ Show details + + - Upgraded `io.netty_netty-common` to version 4.1.115. [PL-58550] + + - Upgraded `protobuf-java` to version 3.25.5. [PL-57351] + + - Upgraded Java to version `17.0.11_9-jre-ubi9-minimal` in the delegate. [PL-55499] + + - Upgraded Spring Framework libraries from version 6.0.23 to 6.1.14. [PL-58254] + + - Upgraded `com.nimbusds_nimbus-jose-jwt` to version 9.37.3. [PL-51347] + +
+ +- Added a feature flag to hide the delegate image update UI banner. [PL-58691] + +- Removed the `connected-ratio-with-primary` and `connected-delegate-ratio` APIs, and updated the `delegate/rings` API response to return the immutable delegate version instead of the legacy delegate version. [PL-57518] + +- We now support updating AWS secrets via the "secretsmanager:PutResourcePolicy" permission. [PL-58652] + +- Users can now be granted access to specific variables, instead of only having access to all or no variables. By adding specific variables to a resource group, we can use role assignments to control access to those variables. [PL-58269] + +- We’ve added the ability to restrict access to specific variables. Users can now be granted access to only certain variables by adding them to a resource group and using role assignments. [PL-58267] + +- OIDC connections are now supported for AWS Secret Manager via Platform. [PL-58133] + +- Changed the base image from `redhat/ubi8-minimal:8.8` to `redhat/ubi8-minimal:8.10` [PL-58062] + +
+ Image update details + 1. delegate + 2. delegate-minimal + 3. ci-addon + 4. lite-engine +
+ +- Removed delegate & watcher JARs and client tools from the delegate proxy. [PL-58052] + +- Added a new query parameter `scope` to the existing `listDelegates` endpoint. [PL-57724] + - If `scope` is set to true, it will list all delegates, including those in hierarchical scope. + - By default, `scope` is false. + +#### Chaos Engineering + +- Extends ChaosGuard conditions for node-level chaos experiments. (CHAOS-6788) + +- Adds [advanced settings](https://developer.harness.io/release-notes/chaos-engineering#version-1491) to the UI for the "edit infrastructure" page. (CHAOS-6718) + +- Adds local and UTC times in the cron schedule next run. (CHAOS-6974) + +- Adds a pre-check to the Windows global blackhole experiment to verify if the firewall is enabled for the target Windows VM. If not, the `ENABLE_FIREWALL` tunable is set, which, by default, enables the firewall. (CHAOS-6848) + +- Introduces the Windows disk fill chaos experiment, supported by Windows chaos infrastructure. (CHAOS-6842) + +#### Security Testing Orchestration + +- Veracode step is now available as an SCA scanner (STO-8275). + +- Added RBAC to prevent users from editing Ticket Settings. Previously, they could attempt to edit and were blocked by the API, the error message is also enhanced. (STO-7567) + +#### Cloud Cost Management + +- Budget Amount History in Drilldown Chart: The budget drilldown chart now accurately preserves the history of budget amount changes, improving visibility into budget adjustments. [CCM-19494] + +- AWS Payer Account ID Field Added: We’ve introduced the AWS Payer Account ID in the AWS fields dropdown for Perspectives and Cost Categories Rules. Only account IDs will be shown, excluding account names. [CCM-19843] + +- Fixed ECS Metrics Chart Issue: CPU recommendations in ECS metrics charts previously displayed incorrect graphs, and CPU usage showed as 0. This has now been corrected. [CCM-19715] + +### Fixed issues + +#### Harness Platform + +- Dynamic secret references now support dots in the secret name, not just for specifying scope like `account.`. The limitation has been removed. [PL-58771] + +- Fixed a bug in the Harness API docs to allow "application/json" as the ContentType for LDAP APIs. [PL-58671] + +- Fixed an issue where the 'View Details' button in MS Teams notifications created a broken URL, causing a 404 error. [PL-58643] + +- Fixed a Null Pointer Exception (NPE) when acquiring delegate tasks in polling mode with the FF - RECOMPUTE_ELIGIBLE_DELEGATES_LIST enabled. [PL-58573] + +- Added support for runtime and default/allowed values in the Custom Secret Manager connector. [PL-58460] + +- Individual audit events now follow the access policy of the entire audit trail. This means that if a user can view the audit trail, they can also view the details of individual audit events within it. [PL-58111] + +- In the Resource Group Details page, selecting "All" hides the radio button sets (All, By `{Tag, Type}`, Specified) and the `+` Add button from the resource card of every type. [PL-58108] + +- Enhanced delegate task logging by adding the delegate's hostname for better traceability and clarity in task response logs. [PL-58095] + +- Disabled the Plans page UI behind a feature flag. [PL-58059] + +- Fixed a bug in the List Users in User Group API to correctly filter and list only users belonging to the user group when using user email filters. [PL-58049] + +- The "/.wellknown/jwks" endpoint for OIDC now exposes the "alg" value as "RS256" instead of "RSA256". [PL-58029] + +- Fixed an issue on the EventSummary link under AuditLogs where a new, unhandled action caused the page to break. It now displays "N/A," similar to the AuditLogs page. [PL-57850] + +- In the latest update, we've improved access control for listing secrets across scopes. Users can now list secrets at PROJECT, ACCOUNT, ORG, or ALL scopes. Previously, listing secrets across ALL scopes included inaccessible ones. This has been fixed, so users will only see secrets they have permission to access. [PL-57808] + +- The OIDC auth method has been added to the AWS connector. Support for it was previously missing in the audit streaming flow, but it is now included, allowing AWS connectors with OIDC to stream audits. [PL-57718] + +- The delegate name is now shown in the UI when a connector test fails, if the validation task was acquired by a delegate. This improvement provides better visibility into which delegate handled the task during troubleshooting. [PL-56483] + +#### Security Testing Orchestration + +- Fixed a configuration issue in some Harness production environments that prevented STO steps from running in IaCM stages (STO-8291, ZD-73770). + +- Fixed a bug in the External Tickets page where deselecting a connector would succeed, but the UI did not update properly. (STO-8200) + +#### Supply Chain Security + +- Fixed an issue where attestation with Cosign using a secret manager did not work with account-level and org-level vault connectors; it now works with both. (SSCA-2955) + +- Fixed an issue where adding the SLSA verification step in the same stage as the artifact creation using Docker build and push resulted in duplicate artifact entries with the same name and tag in the execution history. (SSCA-2949) + +#### Chaos Engineering + +- Fixed the cron experiment execution that was not working with Linux and Windows infrastructure (CHAOS-7044) + +- Fixed the issue of **Visual** and **YAML** tabs overlapping while trying to toggle between them in the **Condition Editor** in ChaosGuard. (CHAOS-7026) + +- Fixed the input mechanism for specifying multiple zones for CLB AZ down chaos fault, now allowing comma-separated values for multiple inputs. (CHAOS-6909) + +- Fixed an issue with the bulk update experiment selection checkbox not de-selecting after updating a cron job. (CHAOS-6856) + +- Fixed the error occurring when performing multiple actions on experiments, such as pushing to a custom ChaosHub, adding to GameDay, and executing the experiments consecutively. (CHAOS-6568) + +- Fixed the UI issue that prevented a GameDay execution after the stakeholder approval. (CHAOS-6467) + +- Fixed the issue where the **Application Maps** drawer was not displayed on the first page due to pagination issues. (CHAOS-6407) + +- Fixed the visibility issue of the status display for the Enterprise ChaosHub in dark mode. (CHAOS-5970) + +- Fixed the issue where two continuous command probes with a short polling duration could not execute in Linux infrastructure. (CHAOS-5461) + +#### Continuous Delivery + +- User was not able to provide timeout for Service Now approval step as Input. This issue is fixed now. (PIPE-23742, ZD-73247) + +- Previously, the Shell Script Provisioner step for PDC infrastructure failed when using runtime inputs for `hostAttributes`. This issue is fixed now. (CDS-104659) + +- When using blue green deployment step in a template, AWS load balancer dropdown was not getting populated. This issue is fixed now. (CDS-104478, ZD-73560) + +- Previously, selecting certain templates in My Organization Templates, UI was not rendering properly . This issue is fixed now, and templates now load and display correctly. (CDS-103675, ZD-73250) + +- During Azure web deployments for windows complete logs were not getting shown. It happened due to a library upgrade for Azure which caused issue with some parts of Azure integration. The issue is fixed now. (CDS-103358, ZD-73664) + +- Previously, approver inputs in multi-approval scenarios displayed only the last approval activity's response, leading to incorrect default values and missing input constraints. The issue is fixed. (CDS-103109) + +- Previously, attempting to delete folders in the file store with names similar to other folders caused errors, even when the folder appeared to have no references. For example, deleting a folder would fail if another folder with a similar prefix contained referenced entities. This issue is fixed now. (CDS-103076, ZD-72658) + +- Previously, URLs used to fetch artifacts from Artifactory with artifact filters did not support special characters, such as spaces, in artifact names. This issue is now fixed to automatically encode URLs, allowing support for special characters. This change is behind FF `CDS_ENCODE_API_REQUESTS`. Please contact [Harness support](mailto:support@harness.io) to enable this feature. (CDS-103041, ZD-69919) + +- Previously, when a canary pod had no data while the primary pod did, the risk should have been marked as "No Data" per [documentation](/docs/continuous-delivery/verify/cv-results/interpret-metric-results#metrics-summary-section). Instead, it was incorrectly marked as "No Analysis," causing issues when using the "Fail on No Analysis" feature. This issue is now fixed to align with the documentation, distinguishing "No Data" from "No Analysis" in risk assessments.(CDS-102865, ZD-72242) + +- Previously, the runtime input regex for the version field in the Google Artifact Registry (GAR) artifact source was not working as expected, causing all tags to display instead of filtering based on the regex. This issue is fixed now, and the version field now correctly supports regex patterns, allowing users to filter tags as intended. (CDS-102800, ZD-72658) + +- Previously, GitOps steps like UpdateReleaseRepo, MergePR, and RevertPR were delayed due to locking on the tokenRef to prevent GitHub rate limits. A new Disable `Git Restraint` option now allows users to bypass this locking for faster execution. (CDS-101882, ZD-71430,72936) + +- Custom approval step was taking long to get complated. The issue is fixed now. (CDS-104513, ZD-73810,74226,74360) + +- User encountered a pipeline failure after enabling the CDS_K8S_CUSTOM_YAML_PARSER feature and using a YAML manifest compatible with Kubernetes Java SDK version 21.x.x, due to a YAML parsing error. The issue is fixed now. (CDS-104066) + +- Fixed an issue in the Approver Allowed modal where changes to parent Formik values caused unintended reference changes in the nested Formik due to shallow spreading of form data. The issue was resolved by ensuring stable references for Formik data. (CDS-103728, ZD-73396) + +- In Azure function deployment instance sync was not reporting instance count. This issue is fixed now. (CDS-103224) + +- During service propogation, failure startegy was nit getting reflected. The issue is fixed now. (CDS-103157) + +- Fixed an issue where the Approval Input Validation Error displayed the placeholder value instead of the actual value. (CDS-103110) + +- Fixed an issue in TerraformCloudRun where the pipeline would get stuck on "Apply" when there were no changes in the plan. The Apply section now correctly identifies that no changes are present and skips execution accordingly, improving the customer experience. (CDS-103088, ZD-72114) + +- Fixed an issue where GitOps Cluster information was not being populated in the YAML, causing failures during pipeline execution. The GitOps Cluster details now sync correctly between the UI and YAML, ensuring seamless pipeline runs. (CDS-102910, ZD-72196) + +- Fixed an issue where artifact versions containing special characters, such as +, could not be fetched from Nexus during deployment. The issue occurred because these characters were not properly encoded in the URI. The implementation now ensures correct encoding, enabling successful artifact retrieval. (CDS-102807, ZD-70660) + +- Fixed an issue in the "Allowed Values" dropdown menu of the manual approval step where the dropdown for the last variable in the list did not fully expand, preventing users from viewing or selecting values. The dropdown menu now adjusts its position based on available space and is fully visible to users. (CDS-102638) + +- Enhanced logging and error messaging for the Terraform Apply stage in CD pipelines. If the Terraform binary is missing, the error message now clearly states: "Terraform binary not found. Please ensure Terraform is properly installed on the delegate." This improvement provides better guidance and visibility throughout the Terraform Apply process, enabling faster troubleshooting. (CDS-101932) + +- Fixed an issue where adding nodeName for Matrix in a Run step within a step group caused the pipeline to fail with a "NullPointerException." This issue occurred when using a strategy configured with node names inside a containerized step group. The fix allows users to successfully apply matrix and other strategy features within containerized step groups. (CDS-101511, ZD-70531,70626,71052) + +- Fixed an issue where the rollback process failed due to an incorrect branch reference for the YAML file. The system previously defaulted to the main branch when retrieving the YAML file during rollback, even if the file was stored in a different branch. The issue has been fixed by adding the necessary logic to include Git details during post-deployment rollbacks, ensuring that the correct branch is referenced. This fix is behind the feature flag `CDS_ADD_GIT_INFO_IN_POST_DEPLOYMENT_ROLLBACK`. Please contact [Harness support](mailto:support@harness.io) to enable this feature. (CDS-101504, ZD-70935) + +- Fixed an issue with the DockerHub connector where the URL field caused confusion for users. The tooltip suggested two different URLs, but only https://index.docker.io/v2/ worked for pushing images. The connector now defaults to https://index.docker.io/v2/ for new connections, allowing customers to override it only if they are using a private repository. This improves usability and eliminates errors during image push attempts. (CDS-99997) + +- Fixed an issue where the "Blue" environment was deleted before the "Green" environment was fully verified in a Blue/Green deployment. More validations have been added for active applications during the deployment process for TAS Blue/Green, ensuring that the active instance is not deleted until the Green environment is fully deployed and ready to take traffic. This enhancement prevents deployment failures and ensures reliable, downtime-free Blue/Green deployments. (CDS-101399, ZD-70706) + +#### Continuous Integration + +- Optimized the way environment variables are injected into build pods, reducing the YAML size to address Kubernetes resource configuration limits. (CI-15245, ZD-71872) + +- Fixed an issue where CI build credits were not getting published for aborted pipelines. (CI-15215) + +- Resolved an issue where Cache Intelligence in self-hosted builds wasn't working properly when 'paths' field was specified. (CI-15201, ZD-73305) + +- Resolved an issue with ECR image links in the artifacts tab for the "Build and Push to ECR" step by adding the missing "/_" separator, ensuring correct functionality. (CI-15089, ZD-72329) + +- Improved secret error debugging for pipeline variables - when referencing a non-existent secret in a pipeline variable, the error message now provides actionable details to help debug, rather than a generic exception. (CI-15013) + +- Resolved an issue where `EnvToSecretMap` was being overwritten by OIDC parameters, causing failures in the VM flow for GCP uploads. (CI-14952) + +- Corrected the artifact URL output in the "Build and Push to GAR" step to ensure the published image URL is formatted correctly. (CI-14917, ZD-71930) + +- Updated Alpine image version to address security vulnerabilities in images `plugins/gcs:1.6.0` and `plugins/artifact-metadata-publisher:2.0`. (CI-14897, ZD-71880) + +- Resolved an issue where OPA enforcement in CI stages didn't work properly in the Self-Managed Enterprise Edition. (CI-14840, ZD-70943) + +- Fixed an issue where bitbucket tag builds with tags containing slashes were causing errors in execution due to `<+codebase.commitSha>` returning null. Harness now correctly supports tags with slashes for bitbucket and git builds, ensuring SHA values are properly referenced. (CI-14706, ZD-70972) + +- Addressed an issue where pipelines failed at the clone codebase step on Windows infrastructure when using the GitHub SSH connector and cloning using LFS. (CI-14592, ZD-70570, ZD-71715) + +- Fixed an issue where the `DRONE_REPO_OWNER` built-in environment variable pointed to the wrong owner when the CI pipeline was triggered by a tag event. A new feature flag (`CI_DRONE_REPO_OWNER`) has been introduced to ensure `DRONE_REPO_OWNER` is correctly extracted from the repository URL. (CI-14468) + +- Resolved an issue to ensure proper functionality for "Upload Artifact to S3" and "Save/Restore Cache to S3" steps when used with AWS connector configured with an External ID. (CI-14214, ZD-69360) + +- Improved "Copy to Clipboard" functionality for pipeline output logs. Previously, extra new lines were added when pasting the copied output, causing unnecessary spacing between lines. This issue has been fixed to ensure log output is pasted without additional line breaks. (CI-14200, ZD-68902) + +- Support for Docker Build Secrets in "Build and Push" Steps - You can now configure Docker build secrets in the Build and Push step using YAML. This feature allows specifying secrets via envDockerSecrets and/or fileDockerSecrets field, applicable when running build-and-push steps using Buildx (not Kaniko). Note that using Buildx in Kubernetes build infrastructure requires privileged access. + +#### Cloud Cost Management + +- GCP Missing Current Daily Costs: We have handled the new invoice.publisher_type column in the GCP Billing export to ensure costs now appear correctly in Perspectives. [CCM-20214] + ## December 19, 2024, patch version 0.23.3 This release includes the following Harness module and component versions. diff --git a/release-notes/software-engineering-insights.md b/release-notes/software-engineering-insights.md index 212bfebf7ac..f5d37a5de30 100644 --- a/release-notes/software-engineering-insights.md +++ b/release-notes/software-engineering-insights.md @@ -24,6 +24,29 @@ These release notes describe recent changes to Harness Software Engineering Insi ## December 2024 +### Version 202412.2 + + + +#### Early access features + +* We have improved the integrations landing page by introducing categorized sections, simplifying navigation and making it easier for users to quickly discover and configure the integrations. (SEI-9804) + +#### New features and enhancements + +* We have implemented improved logic for calculating key sprint metrics. The updates includes the following: + * **Delivered scope creep points:** This metric captures the total story points from all completed creep tickets within the sprint, giving you better visibility into scope changes. + * **Delivered story creep points:** This represents the sum of story points from completed tickets where estimates were increased during the sprint. + * **Velocity points:** The velocity metric now is calculated as the sum of committed points done, delivered scope creep points, and delivered story creep points. Note that this feature is behind a feature flag. Please contact Harness Support to have it enabled for your account. +* Added performance improvements to the **Collection settings** page, significantly reducing loading time (SEI-9647) +* Redesigned the [Azure DevOps integration](/docs/software-engineering-insights/sei-integrations/azure-devops/sei-integration-azure-devops) interface to display separate tiles for Azure Boards, Azure Pipelines, and Azure Repos. This helps identifying the service easily while maintaining unified configuration capabilities. (SEI-9795) + +#### Fixed issues + +* The [GitLab integration](/docs/software-engineering-insights/sei-integrations/gitlab/sei-integration-gitlab) now ignores merge commits while ingesting data to prevent duplicate contributions (SEI-9385) (ZD-73141) +* Fixed the issue where incorrect insight was loading when selected from the **Manage Insights** view (SEI-9855) +* Fixed the bug in the Trellis Score report drill down where *Time spent* column incorrectly included resolution state time (SEI-9733) (ZD-74405) (ZD-75651) + ### Version 202412.1 diff --git a/sidebars.js b/sidebars.js index 4d82f90e235..28ba44aa53a 100644 --- a/sidebars.js +++ b/sidebars.js @@ -535,7 +535,63 @@ const sidebars = { description: "Learn how to store artifacts securely.", }, collapsed: true, - items: [{ type: "autogenerated", dirName: "artifact-registry" }], + items: [ + "artifact-registry/whats-supported", + ], + }, + { + type: "html", + value: "New to Artifact Registry?", + className: "horizontal-bar", + }, + "artifact-registry/get-started/overview", + "artifact-registry/get-started/quickstart", + { + type: "html", + value: "Use Artifact Registry", + className: "horizontal-bar", + }, + { + type: "category", + label: "Manage Registries", + link: { + type: "generated-index", + }, + collapsed: true, + items: [ { type: "autogenerated", dirName: "artifact-registry/manage-registries", } ], + }, + { + type: "category", + label: "Manage Artifacts", + link: { + type: "generated-index", + }, + collapsed: true, + items: [ { type: "autogenerated", dirName: "artifact-registry/manage-artifacts", } ], + }, + { + type: "category", + label: "Platform Integrations", + link: { + type: "generated-index", + }, + collapsed: true, + items: [ { type: "autogenerated", dirName: "artifact-registry/platform-integrations", } ], + }, + + { + type: "html", + value: "Troubleshooting", + className: "horizontal-bar", + }, + { + type: "category", + label: "Authorization & Authentication", + link: { + type: "generated-index", + }, + collapsed: true, + items: [ { type: "autogenerated", dirName: "artifact-registry/troubleshooting/authorization", } ], }, // API Docs { diff --git a/src/components/Docs/data/iacmData.ts b/src/components/Docs/data/iacmData.ts index 472fe86cdeb..fea02c0da21 100644 --- a/src/components/Docs/data/iacmData.ts +++ b/src/components/Docs/data/iacmData.ts @@ -91,7 +91,7 @@ export const docsCards: CardSections = [ title: "Drift Detection", module: MODULES.iacm, description: "Learn how to detect and get notified on drift.", - link: "/docs/infra-as-code-management/pipelines/operations/drift-detection", + link: "/docs/infra-as-code-management/use-iacm/drift-detection", }, { title: "Workspace Permissions and Access Control", diff --git a/src/components/HomepageFeatures/data/featureListData.tsx b/src/components/HomepageFeatures/data/featureListData.tsx index 3aa9330ff1c..50dba9cf095 100644 --- a/src/components/HomepageFeatures/data/featureListData.tsx +++ b/src/components/HomepageFeatures/data/featureListData.tsx @@ -22,8 +22,7 @@ export const featureList: CardItem[] = [ module: MODULES.ar, icon: "img/icon_artifact_registry.svg", description: "Store your binaries natively on Harness.", - link: "docs/artifact-registry/get-started/overview", - type: [docType.Documentation], + link: "docs/artifact-registry", }, { title: "Set up CD Pipelines", diff --git a/src/components/NavbarItems/CoveoSearch.js b/src/components/NavbarItems/CoveoSearch.js deleted file mode 100644 index c1d3ac230ff..00000000000 --- a/src/components/NavbarItems/CoveoSearch.js +++ /dev/null @@ -1,202 +0,0 @@ -/* eslint-disable no-undef */ -import React, { useEffect, useRef, useState } from 'react'; -import { useHistory } from 'react-router-dom'; -import Head from '@docusaurus/Head'; -import './CoveoSearch.scss'; -import BrowserOnly from '@docusaurus/BrowserOnly'; - -const CoveoSearch = () => { - const searchBoxEl = useRef(null); - const searchResultsEl = useRef(null); - const [isCoveoLoaded, setIsCoveoLoaded] = useState(false); - const { - location: { pathname }, - } = useHistory(); - - const initializeCoveo = async () => { - let tokenData = {}; - - const getCoveoToken = async () => { - const rootUrl = window.location.href.split('/').slice(0, 3).join('/'); - try { - const response = await fetch(rootUrl + '/api/coveo_api'); - const data = await response.json(); - const item = { - token: data.token, - orgId: data.id, - expiry: Date.now() + 12 * 60 * 60 * 1000, // 12hrs from now - }; - localStorage.setItem('coveo_token', JSON.stringify(item)); - return item; - } catch (error) { - console.error('Error fetching Coveo token:', error); - } - }; - - const loadCoveoScript = () => { - return new Promise((resolve, reject) => { - const script = document.createElement('script'); - script.src = - 'https://static.cloud.coveo.com/searchui/v2.10094/js/CoveoJsSearch.min.js'; - script.async = true; - script.onload = resolve; - script.onerror = reject; - document.head.appendChild(script); - }); - }; - - const initializeSearch = async () => { - const storedToken = localStorage.getItem('coveo_token'); - if (storedToken) { - const data = JSON.parse(storedToken); - if (data.expiry <= Date.now()) { - tokenData = await getCoveoToken(); - } else { - tokenData = data; - } - } else { - tokenData = await getCoveoToken(); - } - - // Check if tokenData is missing or invalid - if (!tokenData) { - console.error('Error initializing Coveo: Missing token or orgId'); - return; - } - - // Proceed with initializing Coveo if window.Coveo is defined - if (window.Coveo) { - Coveo.SearchEndpoint.configureCloudV2Endpoint( - tokenData.orgId, - tokenData.token - ); - // Coveo.SearchEndpoint.endpoints.default.caller.options.queryStringArguments.debug = 1; - let searchboxRoot = searchBoxEl.current; - let searchRoot = document.createElement('div'); - searchRoot.setAttribute('class', 'coveo-search-results'); - searchRoot.setAttribute('style', 'display: none;'); - - const elemSearchResultConainer = searchResultsEl.current; - if (elemSearchResultConainer) { - elemSearchResultConainer.appendChild(searchRoot); - } - - searchboxRoot.innerHTML = ` -
-
-
- `; - searchRoot.innerHTML = ` - `; - - const coveoRoot = searchRoot.querySelector('#coveo-search'); - Coveo.init(coveoRoot, { - externalComponents: [searchboxRoot], - }); - - Coveo.$$(coveoRoot).on('doneBuildingQuery', function (e, args) { - let q = args.queryBuilder.expression.build(); - if (q) { - searchRoot.style.display = 'block'; - } else { - searchRoot.style.display = 'none'; - } - }); - - Coveo.$$(coveoRoot).on('afterInitialization', function () { - Coveo.state(coveoRoot, 'f:@commonsource', ['Developer Hub']); - }); - } else { - console.error('Coveo script failed to load.'); - } - }; - await loadCoveoScript(); - await initializeSearch(); - }; - - useEffect(() => { - if (!isCoveoLoaded) { - initializeCoveo().then(() => setIsCoveoLoaded(true)); - } - }, [isCoveoLoaded, pathname]); - - return ( - Loading...}> - {() => { - const CoveoErrorReport = document.querySelector('.CoveoErrorReport'); - - if (CoveoErrorReport) { - console.log(Boolean(CoveoErrorReport.ariaHidden)); - - if (CoveoErrorReport.ariaHidden) { - localStorage.removeItem('coveo_token'); - initializeCoveo().then(() => setIsCoveoLoaded(true)); - } - } - return ( -
- - - - - {isCoveoLoaded && ( - - )} - -
-
-
- ); - }} -
- ); -}; - -export default CoveoSearch; diff --git a/src/components/NavbarItems/CoveoSearch.scss b/src/components/NavbarItems/CoveoSearch.scss deleted file mode 100644 index 828ebaf1757..00000000000 --- a/src/components/NavbarItems/CoveoSearch.scss +++ /dev/null @@ -1,1118 +0,0 @@ -/* --- Coveo style overwritten ---- */ -#searchBoxCoveo { - width: 250px; - // width: calc(100vw - 1080px); - margin-left: 10px; -} - -#coveo-search-mask { - position: fixed; - background-color: var(--black); - opacity: 0.7; - width: 100vw; - height: 100vh; - left: 0; - top: 0; - z-index: 97; -} -#searchResultsCoveo { - #coveo-search { - background-color: var(--white); - // box-shadow: 0px 0px 1px rgba(40, 41, 61, 0.04), 0px 2px 4px rgba(96, 97, 112, 0.16); - min-width: 635px; - // z-index: 98; - } - .coveo-search-results { - // width: 1000px; - // width: calc(100vw - 890px); - // height: calc(100vh - 70px); - // width: calc(100vw - 30px); - height: calc(100vh - 95px); - position: absolute; - - // left: 15px; - top: 80px; - background-color: var(--white); - overflow-y: auto; - box-shadow: 0px 0px 1px rgba(40, 41, 61, 0.14), - 0px 2px 4px rgba(96, 97, 112, 0.26); - margin-right: 12px; - border-radius: 5px; - right: 0; - width: 70%; - min-width: 800px; - } - .coveo-facet-column { - // display: none; - } - .coveo-dropdown-header-wrapper { - margin-top: 10px; - .coveo-facet-dropdown-header:hover { - color: var(--white); - } - } -} - -.CoveoSearchbox .magic-box { - // border: none; - // border-radius: 4px !important; -} -.CoveoSearchbox .magic-box .magic-box-input > input { - height: 36px; -} -.magic-box .magic-box-input .magic-box-underlay { - height: 36px !important; -} -.magic-box .magic-box-clear { - height: 36px !important; - width: 36px !important; - line-height: 36px !important; -} -.CoveoSearchbox .magic-box .magic-box-input { - background: var(--white); - border-radius: 4px !important; - height: 36px; -} -#header #search-container input, -#main[data-hd-template='barsv3'] #header input { - background-color: #ffffff !important; - border-radius: 25px !important; - color: #22262e !important; - font-size: 16px !important; -} -.magic-box .magic-box-clear { - background: unset; -} -.CoveoSearchbox .CoveoSearchButton { - border: none; - display: flex; - justify-content: center; - align-items: center; - height: unset; - width: 36px; -} -.CoveoSearchbox .CoveoSearchButton:hover .coveo-search-button-svg, -.CoveoSearchbox .CoveoSearchButton:hover .coveo-magnifier-circle-svg { - color: var(--primary-6); - fill: var(--primary-6); -} -#header #search-container.search-responsive span, -#header #search-container span { - margin-right: unset; - margin-top: unset; - float: unset !important; - /* z-index: 1049 !important; */ -} -#header #search-container.search-responsive span.coveo-omnibox-hightlight, -#header #search-container span.coveo-omnibox-hightlight { - margin-right: unset; -} -.coveo-search-button-svg { - // width: 24px; - // height: 24px; - // color: #fff; -} - -.navbar { - z-index: 99; -} - -/*** styles for coveo search UI ***/ -.harness-search-source { - background-color: #fafbfc; - border: 1px solid #d9dae5; - border-radius: 2px; - /* display: block; - */ - /* padding: 2px 1em; - */ - width: fit-content; - text-align: center; - font-weight: 500; - font-size: 12px; - line-height: 16px; - /* identical to box height, or 133% */ - text-align: center; - letter-spacing: 0.2px; - display: flex; - flex-direction: row; - align-items: center; - padding: 4px 8px; -} -.harness-search-source .CoveoFieldValue .coveo-clickable { - color: #000; -} -.harness-search-module { - display: flex; - gap: 8px; -} -.harness-search-module .CoveoFieldValue, -.harness-search-module .CoveoFieldValue { - /* Module Colors/Feature Flags Management/100 */ - /* Module Colors/Feature Flags Management/200 */ - background-color: #fafbfc; - border: 1px solid #d9dae5; - border-radius: 2px; - font-weight: 500; - font-size: 12px; - line-height: 16px; - /* identical to box height, or 133% */ - text-align: center; - letter-spacing: 0.2px; - /* Module Colors/Feature Flags Management/300 */ - color: var(--gray-800); - display: flex; - flex-direction: row; - align-items: center; - padding: 4px 8px; - width: fit-content; -} -.harness-search-module .CoveoFieldValue .coveo-clickable { - color: var(--gray-800); -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueCI { - background: var(--mod-ci-100); - border: 1px solid var(--mod-ci-200); - color: var(--mod-ci-300); - &::before { - content: url('/img/icon_ci_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueCI .coveo-clickable { - color: var(--mod-ci-300); -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueCD { - background: var(--mod-cd-100); - border: 1px solid var(--mod-cd-200); - color: var(--mod-cd-300); - &::before { - content: url('/img/icon_cd_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueCD .coveo-clickable { - color: var(--mod-cd-300); -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueCC { - background: var(--mod-ccm-100); - border: 1px solid var(--mod-ccm-200); - color: var(--mod-ccm-300); - &::before { - content: url('/img/icon_ccm_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueCC .coveo-clickable { - color: var(--mod-ccm-300); -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueFF { - background: var(--mod-ff-100); - border: 1px solid var(--mod-ff-200); - color: var(--mod-ff-300); - &::before { - content: url('/img/icon_ff_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueFF .coveo-clickable { - color: var(--mod-ff-300); -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueCE { - background: var(--mod-ce-100); - border: 1px solid var(--mod-ce-200); - color: var(--mod-ce-300); - &::before { - content: url('/img/icon_ce_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueCE .coveo-clickable { - color: var(--mod-ce-300); -} - -.harness-search-module .CoveoFieldValue.CoveoFieldValueCET { - background: var(--mod-cet-100); - border: 1px solid var(--mod-cet-200); - color: var(--mod-cet-300); - &::before { - content: url('/img/icon_cet_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueCET .coveo-clickable { - color: var(--mod-cet-300); -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueSTO { - background: var(--mod-sto-100); - border: 1px solid var(--mod-sto-200); - color: var(--mod-sto-300); - &::before { - content: url('/img/icon_sto_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueSTO .coveo-clickable { - color: var(--mod-sto-300); -} - -.harness-search-module .CoveoFieldValue.CoveoFieldValueSRM { - background: var(--mod-srm-100); - border: 1px solid var(--mod-srm-200); - color: var(--mod-srm-300); - &::before { - content: url('/img/icon_srm_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueSRM .coveo-clickable { - color: var(--mod-srm-300); -} - -.harness-search-module .CoveoFieldValue.CoveoFieldValueSEI { - background: var(--mod-sei-100); - border: 1px solid var(--mod-sei-200); - color: var(--mod-sei-300); - &::before { - content: url('/img/icon_sei_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueSEI .coveo-clickable { - color: var(--mod-sei-300); -} - -// IDP -.harness-search-module .CoveoFieldValue.CoveoFieldValueIDP { - background: var(--mod-idp-100); - border: 1px solid var(--mod-idp-200); - color: var(--mod-idp-300); - &::before { - content: url('/img/icon_idp_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueIDP .coveo-clickable { - color: var(--mod-idp-300); -} - -// IaCM -.harness-search-module .CoveoFieldValue.CoveoFieldValueIACM { - background: var(--mod-iacm-100); - border: 1px solid var(--mod-iacm-200); - color: var(--mod-iacm-300); - &::before { - content: url('/img/icon_iacm_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueIACM .coveo-clickable { - color: var(--mod-iacm-300); -} - -// SCS -.harness-search-module .CoveoFieldValue.CoveoFieldValueSSCA { - background: var(--mod-ssca-100); - border: 1px solid var(--mod-ssca-200); - color: var(--mod-ssca-300); - &::before { - content: url('/img/icon_ssca_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueSSCA .coveo-clickable { - color: var(--mod-ssca-300); -} - -// Gitness -.harness-search-module .CoveoFieldValue.CoveoFieldValueGitness { - background: var(--mod-code-100); - border: 1px solid var(--mod-code-200); - color: var(--mod-codes-300); - &::before { - content: url('/img/icon_code_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module - .CoveoFieldValue.CoveoFieldValueGitness - .coveo-clickable { - color: var(--mod-code-300); -} - -// Code Repository (CR) -.harness-search-module .CoveoFieldValue.CoveoFieldValueCR { - background: var(--mod-code-100); - border: 1px solid var(--mod-code-200); - color: var(--mod-codes-300); - &::before { - content: url('/img/icon_code_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueCR .coveo-clickable { - color: var(--mod-code-300); -} - -// Database DevOps -.harness-search-module .CoveoFieldValue.CoveoFieldValueDBDevOps { - background: var(--mod-cd-100); - border: 1px solid var(--mod-cd-200); - color: var(--mod-cd-300); - &::before { - content: url('/img/icon_dbdevops_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module - .CoveoFieldValue.CoveoFieldValueDBDevOps - .coveo-clickable { - color: var(--mod-cd-300); -} - -// Armory -.harness-search-module .CoveoFieldValue.CoveoFieldValueArmory { - background: var(--mod-ci-100); - border: 1px solid var(--mod-ci-200); - color: var(--mod-ci-300); - &::before { - content: url('/img/icon_armory_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueArmory .coveo-clickable { - color: var(--mod-ci-300); -} - -// Cloud Development Environments -.harness-search-module .CoveoFieldValue.CoveoFieldValueCDE { - background: var(--mod-sto-100); - border: 1px solid var(--mod-sto-200); - color: var(--mod-sto-300); - &::before { - content: url('/img/icon_cloud_development_environments_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueCDE .coveo-clickable { - color: var(--mod-sto-300); -} - -// Artifact Registry -.harness-search-module .CoveoFieldValue.CoveoFieldValueAR { - background: var(--mod-ci-100); - border: 1px solid var(--mod-ci-200); - color: var(--mod-ci-300); - &::before { - content: url('/img/icon_artifact_registry_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.harness-search-module .CoveoFieldValue.CoveoFieldValueAR .coveo-clickable { - color: var(--mod-ci-300); -} - -.harness-search-module .CoveoFieldValue.CoveoFieldValuePlatform { - &::before { - content: url('/img/icon_harness_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} -.CoveoResult a.CoveoResultLink, -.CoveoResultLink, -a.CoveoResultLink { - color: #000; - font-weight: 500; - font-size: 18px; - line-height: 28px; -} - -.CoveoResult .CoveoPrintableUri a.CoveoResultLink { - font-weight: 400; - font-size: 14px; - line-height: 18px; -} -.CoveoResult a.CoveoResultLink:hover, -.CoveoResultLink, -a.CoveoResultLink:hover { - color: #0278d5; -} -.CoveoExcerpt { - font-weight: normal; - font-size: 14px; - line-height: 20px; - /* or 143% */ - /* Gray Scale / Gray Scale 600 */ - color: #4f5162; -} -.coveo-checkbox-button, -input[type='checkbox'].coveo-checkbox + button { - /* min-width: 18px; - */ - border: 1px solid #0278d5; - box-sizing: border-box; - border-radius: 4px; -} -.coveo-dynamic-facet-value .coveo-checkbox-span-label { - font-weight: 300; - font-size: 12px; - line-height: 15px; - /* identical to box height */ - display: flex; - align-items: flex-end; - text-align: center; - /* Gray Scale / Gray Scale 800 */ - color: #22222a; -} -.coveo-dynamic-facet-value - .coveo-checkbox-label:hover - .coveo-checkbox-span-label, -.coveo-dynamic-facet-value.coveo-focused .coveo-checkbox-span-label, -.coveo-dynamic-facet-value.coveo-selected .coveo-checkbox-span-label { - opacity: 1; - color: var(--black); - font-weight: 400; -} -.coveo-dynamic-facet-value .coveo-checkbox-span-label-suffix { - font-weight: 300; - font-size: 12px; - line-height: 15px; - /* identical to box height */ - display: flex; - align-items: flex-end; - text-align: center; - /* Gray Scale / Gray Scale 600 */ - color: #4f5162; -} -.coveo-dynamic-facet-header-title { - font-weight: 500; - font-size: 18px; - line-height: 28px; - /* identical to box height, or 156% */ - display: flex; - align-items: flex-end; - color: #000; -} -.coveo-dynamic-facet-header { - border-bottom: 1px solid #f3f3fa; -} -.coveo-checkbox-label { - display: grid; - grid-template-columns: auto auto 1fr; -} -.coveo-dynamic-facet-value .coveo-checkbox-span-label-suffix { - justify-content: flex-end; -} -.CoveoSearchInterface .coveo-facet-column { - padding: 13px 20px 5px; -} -.CoveoSearchInterface .coveo-results-column { - padding: 10px 10px 10px 25px; -} -.CoveoQuerySummary, -.CoveoQueryDuration { - color: var(--black); - margin-right: 0.3em; -} -.CoveoSort { - padding: 0 36px 10px; - text-transform: capitalize; - font-weight: 500; - font-size: 14px; - line-height: 24px; - color: #1c1c28; -} -.CoveoSort.coveo-selected, -.CoveoSort.coveo-selected:hover { - border-bottom: 2px solid var(--primary-7); -} -.coveo-result-frame .coveo-result-cell, -.CoveoResult.coveo-result-frame .coveo-result-cell { - font-weight: 300; - /* font-size: 12px; - */ - /* line-height: 15px; - */ - color: var(--gray-800); -} -@media screen and (max-width: 834px) { - .coveo-result-frame .coveo-result-cell, - .CoveoResult.coveo-result-frame .coveo-result-cell { - display: block; - } -} -.coveo-list-layout.CoveoResult { - background: var(--white); - /* Light / Elevation 02 */ - box-shadow: 0px 0px 1px rgba(40, 41, 61, 0.04), - 0px 2px 4px rgba(96, 97, 112, 0.16); - border-radius: 8px; - border-bottom: unset; - margin: 8px auto; - padding: 24px; -} -.coveo-pager-list :first-child { - border-left: unset; -} -.coveo-pager-list :last-child { - border-right: unset; -} -.coveo-pager-list-item { - border: unset !important; - // border-left: 1px solid rgba(2, 120, 213, 0.21); - // border-right: 1px solid rgba(2, 120, 213, 0.21); - font-size: 12px; - background: var(--white); - // box-shadow: 0px 0px 1px #000, 0px 2px 4px #000; - border-radius: unset; - margin-left: -1px; - margin-right: 0; - padding: 4px 10px; - font-weight: 400; -} -.coveo-pager-list-item a { - color: var(--primary-7); - font-weight: 400; -} -.coveo-pager-list-item.coveo-active, -.coveo-pager-list-item:hover { - background-color: var(--primary-7) !important; - color: var(--white) !important; - font-weight: 700; -} -.coveo-pager-list-item.coveo-active a, -.coveo-pager-list-item:hover a { - color: var(--white); - font-weight: 700; -} -.coveo-pager-next, -.coveo-pager-previous { - border: unset; - border-left: 1px solid rgba(2, 120, 213, 0.21); - border-radius: 0 50% 50% 0; - padding: 4px 10px; - margin: 0; - margin-left: -1px; -} -.coveo-pager-next .coveo-pager-next-icon { - padding-left: 4px; - padding-right: 4px; -} -.coveo-pager-next .coveo-pager-next-icon-svg, -.coveo-pager-previous .coveo-pager-next-icon-svg, -.coveo-pager-next .coveo-pager-previous-icon-svg, -.coveo-pager-previous .coveo-pager-previous-icon-svg { - color: var(--primary-7); - width: 6px; - height: 12px; -} -.coveo-pager-next:hover, -.coveo-pager-previous:hover, -.coveo-pager-next:active, -.coveo-pager-previous:active, -.coveo-pager-next a:hover, -.coveo-pager-previous a:hover, -.coveo-pager-next a:active, -.coveo-pager-previous a:active { - color: var(--primary-7); - background-color: var(--primary-7); -} -.coveo-pager-next:hover .coveo-pager-next-icon-svg, -.coveo-pager-previous:hover .coveo-pager-next-icon-svg, -.coveo-pager-next:active .coveo-pager-next-icon-svg, -.coveo-pager-previous:active .coveo-pager-next-icon-svg, -.coveo-pager-next a:hover .coveo-pager-next-icon-svg, -.coveo-pager-previous a:hover .coveo-pager-next-icon-svg, -.coveo-pager-next a:active .coveo-pager-next-icon-svg, -.coveo-pager-previous a:active .coveo-pager-next-icon-svg, -.coveo-pager-next:hover .coveo-pager-previous-icon-svg, -.coveo-pager-previous:hover .coveo-pager-previous-icon-svg, -.coveo-pager-next:active .coveo-pager-previous-icon-svg, -.coveo-pager-previous:active .coveo-pager-previous-icon-svg, -.coveo-pager-next a:hover .coveo-pager-previous-icon-svg, -.coveo-pager-previous a:hover .coveo-pager-previous-icon-svg, -.coveo-pager-next a:active .coveo-pager-previous-icon-svg, -.coveo-pager-previous a:active .coveo-pager-previous-icon-svg { - color: var(--white); -} -.coveo-pager-previous { - border-radius: 50% 0 0 50%; - border: unset; - border-right: 1px solid rgba(2, 120, 213, 0.21); -} -.CoveoResultsPerPage { - font-size: 12px; -} -.CoveoResultsPerPage :first-child { - border-left: unset; -} -.CoveoResultsPerPage :last-child { - border-right: unset; -} -.coveo-results-per-page-list-item { - border: unset !important; - // border-left: 1px solid rgba(2, 120, 213, 0.21); - // border-right: 1px solid rgba(2, 120, 213, 0.21); - color: var(--primary-7); - font-size: 12px; - background: var(--white); - padding: 4px 10px; - margin: 0; - margin-left: -1px; - // box-shadow: 0px 0px 1px #000, 0px 2px 4px #000; -} -.coveo-results-per-page-list-item.coveo-active, -.coveo-results-per-page-list-item:hover, -.coveo-results-per-page-list-item:active { - color: var(--white) !important; - background-color: var(--primary-7) !important; - font-weight: 600; -} -.coveo-results-per-page-text { - margin-right: 0.7em; -} -.coveo-result-frame { - position: relative; -} -.coveo-result-cell-image { - width: 300px; - display: table-cell; -} -.coveo-result-cell-image img { - width: 280px; - height: auto; - max-height: 110px; - position: absolute; - right: 0; - bottom: 0; -} -@media screen and (max-width: 834px) { - .coveo-result-cell-image img { - position: unset; - margin-top: 8px; - } -} -.CoveoLogo { - display: none; -} -.coveo-results-header { - box-shadow: unset; -} -.CoveoSort { - border-bottom: 1px solid var(--primary-1); -} -.coveo-featured-result-badge, -.coveo-recommended-result-badge { - color: var(--primary-7); - text-transform: capitalize; - padding: 5px 0; - /* workaround for moving recommended badge to beside the @source tag */ - position: absolute; - transform: translate(13em, -5px); - z-index: 2; -} -.CoveoSearchInterface.coveo-small-facets .coveo-facet-dropdown-header { - border-radius: 4px; - text-transform: capitalize; - padding: 3px 10px; - height: unset; - line-height: 14px; -} -.CoveoSearchInterface.coveo-small-facets .coveo-facet-dropdown-header:hover { - background-color: rgba(1, 120, 213, 10); -} -.CoveoBreadcrumb { - border-bottom: unset; -} -.CoveoBreadcrumb .coveo-breadcrumb-items { - padding-bottom: 0; -} -.coveo-dynamic-facet-header-btn, -.coveo-dynamic-facet-breadcrumb-collapse, -.coveo-dynamic-facet-breadcrumb-value, -.coveo-breadcrumb-clear-all { - color: var(--primary-7); -} -.CoveoMissingTerms .coveo-clickable { - color: var(--primary-7); -} -.coveo-result-frame .coveo-result-row, -.CoveoResult.coveo-result-frame .coveo-result-row { - position: relative; -} - -// body:not([data-article-id*='undefined']):not([data-category-id='undefined']) h1, body:not([data-article-id*='undefined']):not([data-category-id='undefined']) h2, body:not([data-article-id*='undefined']):not([data-category-id='undefined']) h3, body:not([data-article-id*='undefined']):not([data-category-id='undefined']) h4 { -// margin-top: unset; -// font-size: 16px; -// font-weight: 500; -// } -.magic-box .magic-box-suggestions .magic-box-suggestion span { - color: #666 !important; - // float: left !important; -} - -/* add icons to the module facet */ -.coveo-dynamic-facet-value - .coveo-checkbox-label - button - + .coveo-checkbox-span-label { - &[title='Chaos Engineering']::before { - content: url('/img/icon_ce_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Continuous Error Tracking']::before { - content: url('/img/icon_cet_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Cloud Cost Management']::before { - content: url('/img/icon_ccm_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Continuous Delivery']::before { - content: url('/img/icon_cd_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Continuous Delivery & GitOps']::before { - content: url('/img/icon_cd_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Continuous Integration']::before { - content: url('/img/icon_ci_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Feature Flags']::before { - content: url('/img/icon_ff_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Security Testing Orchestration']::before { - content: url('/img/icon_sto_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Service Reliability Management']::before { - content: url('/img/icon_srm_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Software Engineering Insights']::before { - content: url('/img/icon_sei_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Software Supply Chain Assurance']::before { - content: url('/img/icon_ssca_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Supply Chain Security']::before { - content: url('/img/icon_ssca_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Internal Developer Portal']::before { - content: url('/img/icon_idp_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Infrastructure as Code Management']::before { - content: url('/img/icon_iacm_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Gitness']::before { - content: url('/img/icon_code_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Code Repository']::before { - content: url('/img/icon_code_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Database DevOps']::before { - content: url('/img/icon_dbdevops_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Artifact Registry']::before { - content: url('/img/icon_artifact_registry_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Cloud Development Environments']::before { - content: url('/img/icon_cloud_development_environments_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Armory']::before { - content: url('/img/icon_armory_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Harness Platform']::before { - content: url('/img/icon_harness_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Open Source']::before { - content: url('/img/icon_harness_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='AI Code Assistant']::before { - content: url('/img/icon_aida_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Feature Management & Experimentation']::before { - content: url('/img/icon_ff_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } - &[title='Harness Intelligence']::before { - content: url('/img/icon_aida_s.svg'); - display: inline-block; - width: 16px; - height: 16px; - margin-right: 4px; - } -} - -/* [HDH-114] Cleaner Difference Between FG and NG Docs */ -.harness-search-module .CoveoFieldValue { - &:has(span[title*='FirstGen']) { - background-color: var(--gray-200); - border-color: var(--gray-200); - } - &:has(span[title*='NextGen']) { - background-color: var(--primary-3); - border-color: var(--primary-3); - } -} - -.CoveoLogo { - display: none !important; -} - -/* TBD: needs a better design/solution for the responsiveness of the navbar */ -@media screen and (max-width: 1499px) { - #searchBoxCoveo { - width: 200px; - } -} - -@media screen and (max-width: 1399px) { - #searchBoxCoveo { - width: 200px; - } -} - -@media screen and (max-width: 1349px) { - #searchBoxCoveo { - width: 150px; - } -} - -@media screen and (max-width: 1299px) { - #searchBoxCoveo { - // width: calc(100vw - 1025px); - width: 120px; - .CoveoSearchButton { - display: none; - } - .magic-box-input > input { - font-size: 14px; - } - } - .navbar__logo { - display: flex; - align-items: center; - & > img { - width: 70%; - height: auto; - } - } -} - -@media screen and (max-width: 1179px) { - .navbar__items { - font-size: 14px; - .button--cta, - .button--primary { - font-size: 14px; - padding-left: 1em; - padding-right: 1em; - } - } -} - -@media screen and (max-width: 1079px) { - #searchBoxCoveo { - width: 80px; - // display: none - .magic-box-input > input { - font-size: 12px; - } - } - .magic-box .magic-box-clear { - // height: 12px !important; - width: 14px !important; - // line-height: 12px !important; - } - .CoveoSearchbox .magic-box .magic-box-clear-svg { - width: 8px !important; - // height: 10px !important; - } - .magic-box .magic-box-input .magic-box-underlay, - .magic-box .magic-box-input > input { - padding: 12px 14px 12px 6px !important; - } - .magic-box .magic-box-suggestions { - min-width: 200px; - } -} - -@media screen and (max-width: 1039px) { - .navbar__logo { - & > img { - width: 57%; - } - } -} - -@media screen and (max-width: 996px) { - .navbar #searchBoxCoveo { - display: none; - } - .navbar .navbar-sidebar #searchBoxCoveo { - width: 96%; - display: block; - } - - #searchResultsCoveo .coveo-search-results { - top: 222px; // 195px; // 50px; - margin-right: 0; - z-index: 1; - min-width: var(--ifm-navbar-sidebar-width); - } - #searchResultsCoveo #coveo-search { - min-width: unset; - } - .navbar__logo { - & > img { - width: 100%; - } - } -} diff --git a/src/components/NavbarItems/Engine.ts b/src/components/NavbarItems/Engine.ts new file mode 100644 index 00000000000..de3e6232f6d --- /dev/null +++ b/src/components/NavbarItems/Engine.ts @@ -0,0 +1,57 @@ +import { buildSearchEngine, getOrganizationEndpoints } from '@coveo/headless'; + +interface ITokenData { + token?: string; + orgId?: string; + expiry?: number; +} + +async function InitializeCoveo() { + let tokenData: ITokenData = {}; + const getCoveoToken = async () => { + const rootUrl = window.location.href.split('/').slice(0, 3).join('/'); + try { + const response = await fetch(rootUrl + '/api/coveo_api'); + const data = await response.json(); + + const item = { + token: data.token, + orgId: data.id, + expiry: Date.now() + 12 * 60 * 60 * 1000, // 12hrs from now + }; + localStorage.setItem('coveo_token', JSON.stringify(item)); + return item; + } catch (error) { + console.error('Error fetching Coveo token:', error); + return {}; + } + }; + + const storedToken = localStorage.getItem('coveo_token'); + if (storedToken) { + const data = JSON.parse(storedToken); + if (data.expiry <= Date.now()) { + tokenData = await getCoveoToken(); + } else { + tokenData = data; + } + } else { + tokenData = await getCoveoToken(); + } + + try { + const engine = buildSearchEngine({ + configuration: { + organizationId: tokenData?.orgId || '', + accessToken: tokenData?.token || '', + organizationEndpoints: getOrganizationEndpoints(tokenData?.orgId || ''), + }, + }); + + return engine; + } catch (error) { + return null; + } +} + +export default InitializeCoveo; diff --git a/src/components/NavbarItems/components/Facet.tsx b/src/components/NavbarItems/components/Facet.tsx new file mode 100644 index 00000000000..6b3fce893fc --- /dev/null +++ b/src/components/NavbarItems/components/Facet.tsx @@ -0,0 +1,156 @@ +import { Facet as FacetController } from '@coveo/headless'; +import React, { useEffect, useState } from 'react'; +import { moduleIconAndColor } from '../data'; +import styles from './styles.module.scss'; +interface FacetProps { + controller: FacetController; + title: string; + toggleClicked: () => void; +} +const Facet: React.FC = (props) => { + const { controller, toggleClicked } = props; + const [state, setState] = useState(controller.state); + const [open, setOpen] = useState(true); + useEffect(() => { + const unsubscribe = controller.subscribe(() => { + setState(controller.state); + }); + + return () => { + unsubscribe(); + }; + }, []); + + useEffect(() => { + const url = new URL(window.location.href); + const params = new URLSearchParams(url.search); + const QueryCommonmodule = params.get('f-commonmodule'); + const QueryCommonsource = params.get('f-commonsource'); + const QueryCategoryname = params.get('f-categoryname'); + if (QueryCommonmodule && controller.state.facetId == 'commonmodule') { + const values = QueryCommonmodule.split(','); + values.forEach((value) => { + controller.toggleSelect({ + numberOfResults: 0, + state: 'selected', + value: value, + }); + }); + } + if (QueryCommonsource && controller.state.facetId == 'commonsource') { + const values = QueryCommonsource.split(','); + values.forEach((value) => { + controller.toggleSelect({ + numberOfResults: 0, + state: 'selected', + value: value, + }); + }); + } + if (QueryCategoryname && controller.state.facetId == 'categoryname') { + const values = QueryCategoryname.split(','); + values.forEach((value) => { + controller.toggleSelect({ + numberOfResults: 0, + state: 'selected', + value: value, + }); + }); + } + if ( + !QueryCategoryname && + !QueryCommonsource && + !QueryCommonmodule && + controller.state.facetId == 'commonsource' + ) { + console.log('no url facets'); + + controller.toggleSelect({ + numberOfResults: 0, + state: 'selected', + value: 'Developer Hub', + }); + } + controller.sortBy('alphanumeric'); + controller.showMoreValues(); + }, []); + + if (!state.values.length) { + return <>; + } + + function handleClick() { + setOpen(!open); + } + + const showMore = () => { + controller.showMoreValues(); + }; + + const showLess = () => { + controller.showLessValues(); + }; + + return ( +
+
+

{props.title}

+ {open ? ( + + ) : ( + + )} +
+
+ + {open && ( +
    + {state.values.map((value) => ( +
  • + { + controller.toggleSelect(value); + toggleClicked(); + }} + disabled={state.isLoading} + /> +
    + {moduleIconAndColor[value.value]?.iconUrl && ( + {value.value} + )} +

    + {value.value} +

    + ({value.numberOfResults}) +
    +
  • + ))} + + {state.canShowMoreValues && ( + + )} + {state.canShowLessValues && ( + + )} +
+ )} +
+ ); +}; + +export default Facet; diff --git a/src/components/NavbarItems/components/FacetBreadCrumbs.tsx b/src/components/NavbarItems/components/FacetBreadCrumbs.tsx new file mode 100644 index 00000000000..0ea8d1499c4 --- /dev/null +++ b/src/components/NavbarItems/components/FacetBreadCrumbs.tsx @@ -0,0 +1,52 @@ +import { BreadcrumbManagerState } from '@coveo/headless'; +import React, { useEffect, useState } from 'react'; +import styles from './styles.module.scss'; + +export default function FacetBreadcrumbs(props) { + const { controller } = props; + const [state, setState] = useState(controller.state); + + useEffect(() => { + const unsubscribe = controller.subscribe(() => { + setState(controller.state); + }); + return () => unsubscribe(); + }, []); + + const mapBreadCrumbField = { + commonsource: 'Source', + commonmodule: 'Module', + categoryname: 'Content Type', + }; + return ( + <> +
+
+ {state.facetBreadcrumbs.map((breadcrumb, index) => ( +
+

+ {mapBreadCrumbField[breadcrumb.field]}:{' '} +

+
+ {breadcrumb.values.map((value, i) => ( +

value.deselect()}> + {value.value.value} +

+ ))} +
+
+ ))} +
+ {state.hasBreadcrumbs && ( + + )} +
+ {state.hasBreadcrumbs &&
} + + ); +} diff --git a/src/components/NavbarItems/components/Pager.tsx b/src/components/NavbarItems/components/Pager.tsx new file mode 100644 index 00000000000..b63fae04956 --- /dev/null +++ b/src/components/NavbarItems/components/Pager.tsx @@ -0,0 +1,61 @@ +import { Pager } from '@coveo/headless'; +import React, { useEffect, useState } from 'react'; +import styles from './styles.module.scss'; +interface PagerProps { + controller: Pager; +} + +const Pager: React.FC = (props) => { + const { controller } = props; + const [state, setState] = useState(controller.state); + const [activePage, setActivePage] = useState(1); + + useEffect(() => { + const unsubscribe = controller.subscribe(() => { + setState(controller.state); + }); + + return () => unsubscribe(); + }, []); + + useEffect(() => { + controller.selectPage(1); + }, []); + return ( + + ); +}; + +export default Pager; diff --git a/src/components/NavbarItems/components/QuerySummaryAndSort.tsx b/src/components/NavbarItems/components/QuerySummaryAndSort.tsx new file mode 100644 index 00000000000..fb5065a227f --- /dev/null +++ b/src/components/NavbarItems/components/QuerySummaryAndSort.tsx @@ -0,0 +1,133 @@ +import { + buildDateSortCriterion, + buildRelevanceSortCriterion, + QuerySummaryState, + SortByDate, + SortByRelevancy, + SortOrder, + SortState, +} from '@coveo/headless'; +import React, { useEffect, useState } from 'react'; +import styles from './styles.module.scss'; +import Tooltip from 'rc-tooltip'; + +const QuerySummaryAndSort = (props) => { + const { summaryController, sortController } = props; + const [summaryState, setSummaryState] = useState( + summaryController.state + ); + const [copied, setCopied] = useState(false); + + useEffect(() => { + setTimeout(() => { + setCopied(false); + }, 10000); + }, [copied]); + useEffect(() => { + const unsubscribe = summaryController.subscribe(() => { + setSummaryState(summaryController.state); + }); + + return () => unsubscribe(); + }, []); + + const relevanceSortCriterion: SortByRelevancy = buildRelevanceSortCriterion(); + const dateDescendingSortCriterion: SortByDate = buildDateSortCriterion( + SortOrder.Descending + ); + const dateAscendingSortCriterion: SortByDate = buildDateSortCriterion( + SortOrder.Ascending + ); + + const [sortState, setSortState] = useState(sortController.state); + useEffect(() => { + const unsubscribe = sortController.subscribe(() => { + setSortState(sortController.state); + }); + + return () => unsubscribe(); + }, []); + useEffect(() => { + sortController.sortBy(relevanceSortCriterion); + }, []); + const [isSortDate, setIsSortDate] = useState(false); + const [isDateAscending, setIsDateAscending] = useState(true); + function handleSort(criterion: string) { + switch (criterion) { + case 'relevance': + setIsSortDate(false); + sortController.sortBy(relevanceSortCriterion); + break; + case 'date': + setIsSortDate(true); + if (isDateAscending) { + sortController.sortBy(dateAscendingSortCriterion); + } else { + sortController.sortBy(dateDescendingSortCriterion); + } + setIsDateAscending(!isDateAscending); + break; + default: + console.log('Error'); + } + } + function handleShareClick() { + setCopied(true); + const input = document.createElement('input'); + input.value = props.copyUrl; + document.body.appendChild(input); + input.select(); + document.execCommand('copy'); + document.body.removeChild(input); + } + return ( +
+
+
+ {' '} +

+ Results{' '} + + {summaryState.firstResult} - {summaryState.lastResult} + {' '} + of {summaryState.total} for{' '} + {summaryState.query} in{' '} + {summaryState.durationInSeconds} seconds +

+
+ + + +
+
+ + +
+
+ ); +}; + +export default QuerySummaryAndSort; diff --git a/src/components/NavbarItems/components/ResultList.tsx b/src/components/NavbarItems/components/ResultList.tsx new file mode 100644 index 00000000000..fffe049e4f8 --- /dev/null +++ b/src/components/NavbarItems/components/ResultList.tsx @@ -0,0 +1,109 @@ +import React, { useEffect, useState } from 'react'; +import styles from './styles.module.scss'; +import { contentTypeData, moduleIconAndColor } from '../data'; +import { ResultList as ResultListController } from '@coveo/headless'; +interface ResultListProps { + controller: ResultListController; +} +const ResultList: React.FC = (props) => { + const { controller } = props; + const [state, setState] = useState(controller.state); + + useEffect( + () => + controller.subscribe(() => { + setState(controller.state); + }), + [] + ); + if (!state.results.length) { + return
No results
; + } + function handleClick(url: string) { + window.location.href = url; + } + return ( +
+
    + {state.results.map((result) => ( +
  • handleClick(result.clickUri)} + > +
    + {result?.raw?.commonsource ? ( +
    +
    {result?.raw?.commonsource as string}
    +
    + ) : ( + <> + )} + +

    {result.excerpt}

    +
    + {Array.isArray(result?.raw?.commonmodule) && + result.raw.commonmodule.length > 0 + ? (result?.raw?.commonmodule as Array)?.map( + (module) => ( +
    + {module} + +

    + {module} +

    +
    + ) + ) + : null} + + {result?.raw?.categoryname && ( +
    + {result?.raw?.categoryname as string} +
    + )} +
    +
    +
  • + ))} +
+
+ ); +}; + +export default ResultList; diff --git a/src/components/NavbarItems/components/ResultsPerPage.tsx b/src/components/NavbarItems/components/ResultsPerPage.tsx new file mode 100644 index 00000000000..0a9e0ce8ac0 --- /dev/null +++ b/src/components/NavbarItems/components/ResultsPerPage.tsx @@ -0,0 +1,46 @@ +import { ResultsPerPage } from '@coveo/headless'; +import React, { useEffect, useState } from 'react'; +import styles from './styles.module.scss'; +interface ResultsPerPageProps { + controller: ResultsPerPage; +} +const ResultsPerPage: React.FC = (props) => { + const { controller } = props; + const options = [10, 25, 50, 100]; + + const [state, setState] = useState(options[0]); + + useEffect(() => { + const unsubscribe = controller.subscribe(() => {}); + + return () => { + unsubscribe(); + }; + }, [controller]); + + const handleChange = (numberOfResults: number) => { + controller.set(numberOfResults); + setState(numberOfResults); + }; + + return ( +
+

Results per page

+
    + {options.map((numberOfResults) => ( +
  • + +
  • + ))} +
+
+ ); +}; + +export default ResultsPerPage; diff --git a/src/components/NavbarItems/components/SearchBox.tsx b/src/components/NavbarItems/components/SearchBox.tsx new file mode 100644 index 00000000000..dc51aea58fa --- /dev/null +++ b/src/components/NavbarItems/components/SearchBox.tsx @@ -0,0 +1,125 @@ +import React, { useState, useEffect } from 'react'; +import { SearchBox as SearchBoxController } from '@coveo/headless'; +import styles from './styles.module.scss'; + +interface SearchBoxProps { + controller: SearchBoxController; + onSearch: (input: string) => void; +} + +const SearchBox: React.FC = (props) => { + const { controller } = props; + const [state, setState] = useState(controller.state); + const [inputValue, setInputValue] = useState(controller.state.value); + const [isUrlParamsLoaded, setIsUrlParamsLoaded] = useState(false); + const [highlightedIndex, setHighlightedIndex] = useState(null); + + useEffect(() => { + controller.subscribe(() => setState(controller.state)); + const url = new URL(window.location.href); + const params = new URLSearchParams(url.search); + const query = params.get('q'); + if (query) { + controller.updateText(query); + controller.submit(); + props.onSearch(query); + setIsUrlParamsLoaded(true); + } + }, []); + + const handleKeyDown = (event: React.KeyboardEvent) => { + const suggestionCount = state.suggestions.length; + + switch (event.key) { + case 'ArrowDown': + event.preventDefault(); + setHighlightedIndex( + highlightedIndex === null + ? 0 + : (highlightedIndex + 1) % suggestionCount + ); + break; + case 'ArrowUp': + event.preventDefault(); + setHighlightedIndex( + highlightedIndex === null + ? suggestionCount - 1 + : (highlightedIndex - 1 + suggestionCount) % suggestionCount + ); + break; + case 'Enter': + if (highlightedIndex !== null && state.suggestions[highlightedIndex]) { + event.preventDefault(); + controller.selectSuggestion( + state.suggestions[highlightedIndex].rawValue + ); + props.onSearch(state.suggestions[highlightedIndex].rawValue); + setHighlightedIndex(null); + } else { + controller.submit(); + props.onSearch(inputValue); + } + break; + case ' ': + if (highlightedIndex !== null && state.suggestions[highlightedIndex]) { + event.preventDefault(); + controller.selectSuggestion( + state.suggestions[highlightedIndex].rawValue + ); + props.onSearch(state.suggestions[highlightedIndex].rawValue); + setHighlightedIndex(null); + } + break; + default: + break; + } + }; + + return ( +
+
+ { + controller.updateText(e.target.value); + setInputValue(e.target.value); + setIsUrlParamsLoaded(false); + setHighlightedIndex(null); + }} + onKeyDown={handleKeyDown} + type="search" + /> + +
+ {!isUrlParamsLoaded && state.suggestions.length > 0 && ( +
    + {state.suggestions.map((suggestion, index) => { + const value = suggestion.rawValue; + const isHighlighted = index === highlightedIndex; + return ( +
  • { + controller.selectSuggestion(value); + props.onSearch(suggestion.rawValue); + setHighlightedIndex(null); + }} + onKeyDown={handleKeyDown} + className={isHighlighted ? styles.highlighted : ''} + > +

    {value}

    +
  • + ); + })} +
+ )} +
+ ); +}; + +export default SearchBox; diff --git a/src/components/NavbarItems/components/SearhResultBox.tsx b/src/components/NavbarItems/components/SearhResultBox.tsx new file mode 100644 index 00000000000..f4872a27f48 --- /dev/null +++ b/src/components/NavbarItems/components/SearhResultBox.tsx @@ -0,0 +1,236 @@ +import { + BreadcrumbManager as BreadcrumbManagerType, + buildBreadcrumbManager, + buildFacet, + buildPager, + buildQuerySummary, + buildRelevanceSortCriterion, + buildResultList, + buildResultsPerPage, + buildSort, + PagerOptions, + QuerySummary as QuerySummaryType, + SortByRelevancy, + Sort as SortType, +} from '@coveo/headless'; +import React, { forwardRef, useEffect, useState } from 'react'; +import Facet from './Facet'; +import FacetBreadcrumbs from './FacetBreadCrumbs'; +import Pager from './Pager'; +import QuerySummaryAndSort from './QuerySummaryAndSort'; +import ResultList from './ResultList'; +import ResultsPerPage from './ResultsPerPage'; +import styles from './styles.module.scss'; +interface SearchResultBoxProps { + open: boolean; + engine: any; + searchValue: string; +} +interface ShareLinkValue { + facetId: string; + value: string; +} +const SearchResultBox = forwardRef( + ({ open, engine }, ref) => { + const [copyUrl, setCopyUrl] = useState(''); + const [clicked, setClicked] = useState(false); + const [showFacet, setShowFacet] = useState(false); + const [categorynameFacetController, setCategorynameFacetController] = + useState(null); + const [commonmoduleFacetController, setCommonmoduleFacetController] = + useState(null); + const [commonsourceFacetController, setCommonsourceFacetController] = + useState(null); + const [facetBreadCrumbsController, setFacetBreadCrumbsController] = + useState(null); + const [resultListController, setResultListController] = useState(null); + const [summaryController, setSummaryController] = useState(null); + const [sortController, setSortController] = useState(null); + const [pagerController, setPagerController] = useState(null); + const [resultsPerPageController, setResultsPerPageController] = + useState(null); + + useEffect(() => { + async function Initialize() { + const categorynameFacetController = buildFacet(engine, { + options: { field: 'categoryname', numberOfValues: 10 }, + }); + + const commonsourceFacetController = buildFacet(engine, { + options: { field: 'commonsource', numberOfValues: 10 }, + }); + + const commonmoduleFacetController = buildFacet(engine, { + options: { field: 'commonmodule', numberOfValues: 10 }, + }); + + const facetBreadCrumbsController: BreadcrumbManagerType = + buildBreadcrumbManager(engine); + + const resultListController = buildResultList(engine, { + options: { + fieldsToInclude: ['categoryname', 'commonmodule', 'commonsource'], + }, + }); + + const summaryController: QuerySummaryType = buildQuerySummary(engine); + const relevanceSortCriterion: SortByRelevancy = + buildRelevanceSortCriterion(); + const sortController: SortType = buildSort(engine, { + initialState: { + criterion: relevanceSortCriterion, + }, + }); + + const options: PagerOptions = { numberOfPages: 5 }; + const pagerController = buildPager(engine, { options }); + + const resultsPerPageController = buildResultsPerPage(engine, { + initialState: { numberOfResults: 10 }, + }); + setCategorynameFacetController(categorynameFacetController); + setCommonmoduleFacetController(commonmoduleFacetController); + setCommonsourceFacetController(commonsourceFacetController); + setFacetBreadCrumbsController(facetBreadCrumbsController); + setResultListController(resultListController); + setSummaryController(summaryController); + setSortController(sortController); + setPagerController(pagerController); + setResultsPerPageController(resultsPerPageController); + } + Initialize(); + }, []); + + useEffect(() => { + function extractSelectedFacets(data) { + const result = []; + + for (const key in data) { + if ( + data[key].request && + Array.isArray(data[key].request.currentValues) + ) { + const facetId = data[key].request.facetId; + + data[key].request.currentValues.forEach((item) => { + if (item.state === 'selected') { + result.push({ + facetId: facetId, + value: item.value, + }); + } + }); + } + } + return result; + } + + function generateQueryString(data: ShareLinkValue[]) { + const grouped = data.reduce((acc, { facetId, value }) => { + if (!acc[facetId]) { + acc[facetId] = []; + } + acc[facetId].push(value); + return acc; + }, {} as Record); + + const queryString = Object.entries(grouped) + .map(([facetId, values]) => { + const encodedValues = values + .map((value) => encodeURIComponent(value)) + .join(','); + return `f-${facetId}=${encodedValues}`; + }) + .join('&'); + + return queryString; + } + + setTimeout(() => { + const shareLinkValues = extractSelectedFacets(engine.state.facetSet); + // console.log(shareLinkValues); + + const queryString = generateQueryString(shareLinkValues); + const rootUrl = window.location.href.split('/').slice(0, 3).join('/'); + const fullUrl = `${rootUrl}?q=${encodeURIComponent( + engine.state.query.q + )}&${queryString}`; + + setCopyUrl(fullUrl); + }, 3000); + + }, [clicked, open]); + + function toggleShowFacet() { + setShowFacet(!showFacet); + } + function toggleClicked() { + setClicked(!clicked); + } + return ( + <> + {open && ( +
+
+ + + +
+
+ + {showFacet && ( +
+ + + +
+ )} + + + +
+ +
+
+ + +
+
+
+ )} + + ); + } +); + +export default SearchResultBox; diff --git a/src/components/NavbarItems/components/styles.module.scss b/src/components/NavbarItems/components/styles.module.scss new file mode 100644 index 00000000000..53e465d8d9d --- /dev/null +++ b/src/components/NavbarItems/components/styles.module.scss @@ -0,0 +1,612 @@ +.searchBoxMain { + position: relative; + margin: 0 10px; + max-width: 220px; + + ul { + width: 220px; + position: absolute; + z-index: 200; + border: 1px solid var(--gray-300); + overflow: hidden; + margin: 0; + padding: 0; + background-color: var(--background-color); + li { + cursor: pointer; + width: 100%; + list-style: none; + color: var(--text-color); + text-align: left; + padding: 12px; + font-size: 16px; + line-height: 24px; + height: 40px; + white-space: pre; + overflow: auto; + box-sizing: border-box; + display: block; + overflow: hidden; + display: flex; + justify-content: flex-start; + align-items: center; + &:hover { + background-color: var(--gray-100); + } + &:focus { + outline: none !important; + } + } + p { + margin: 0; + padding: 0; + } + .highlighted { + background-color: var(--gray-100); + &:focus { + outline: none !important; + } + } + } + .searchBox { + display: flex; + justify-content: flex-start; + align-items: center; + gap: 8px; + + i { + font-size: 18px; + cursor: pointer; + display: block; + @media (max-width: 1220px) { + display: none; + } + } + input { + width: 100%; + max-width: 220px; + color: var(--text-color); + text-align: left; + padding: 12px; + font-size: 16px; + line-height: 24px; + height: 40px; + white-space: pre; + overflow: auto; + box-sizing: border-box; + display: block; + border-radius: 4px; + border: 1px solid var(--gray-300); + &:focus { + outline: none !important; + border: 1px solid var(--gray-400); + } + + @media (max-width: 1320px) { + max-width: 100px; + } + @media (max-width: 996px) { + max-width: 720px; + } + } + } +} +.SearchResultBox { + position: absolute; + max-height: 90vh; + overflow-y: auto; + overflow-x: hidden; + max-width: 1041px; + right: 12px; + width: 90%; + display: flex; + padding: 16px; + justify-content: flex-start; + gap: 16px; + background-color: var(--background-color); + // background-color: yellow; + border: 1px solid white; + border-radius: 8px; + margin: 8px auto; + box-shadow: 0 0 1px rgba(40, 41, 61, 0.04), 0 4px 8px rgba(96, 97, 112, 0.16); + z-index: 100; + + @media (max-width: 996px) { + width: 94%; + } + .right { + flex-grow: 1; + width: 100%; + .responsiveFacet { + display: none; + @media (max-width: 840px) { + display: inline; + } + } + .filterBtn { + border-radius: 2px; + + font-size: 12px; + display: inline-block; + padding: 0 7px; + height: 22px; + font-weight: 700; + line-height: 20px; + letter-spacing: 0.09px; + vertical-align: middle; + white-space: normal; + color: var(--link-color-hover); + cursor: pointer; + text-transform: uppercase; + margin-bottom: 12px; + display: none; + border: none; + background-color: #d9dae5; + @media (max-width: 840px) { + display: block; + } + } + } + .left { + width: 240px; + @media (max-width: 840px) { + display: none; + } + } +} + +.resultList { + ul { + padding: 0; + margin: 0; + } + ul > li { + margin-bottom: 16px; + text-decoration: none; + list-style: none; + width: 100%; + max-width: 750px; + padding: 16px; + display: flex; + flex-direction: column; + gap: 8px; + border-bottom: thin solid #bcc3ca; + background: var(--coveo-card-bg); + border-bottom: unset; + border-radius: 8px; + margin: 8px auto; + box-shadow: 0 0 1px rgba(40, 41, 61, 0.04), + 0 2px 4px rgba(96, 97, 112, 0.16); + article { + display: flex; + gap: 12px; + flex-direction: column; + } + + .heading { + h2 { + margin: 0; + padding: 0; + color: var(--link-color); + text-decoration: none; + cursor: pointer; + word-wrap: break-word; + font-size: 18px; + font-weight: 500; + line-height: 28px; + &:hover { + text-decoration: underline; + color: var(--link-color-hover); + } + } + a { + font-size: 14px; + font-weight: 400; + line-height: 18px; + color: var(--link-color); + text-decoration: none; + cursor: pointer; + word-wrap: break-word; + &:hover { + text-decoration: underline; + color: var(--link-color-hover); + } + } + } + + p { + margin: 0; + padding: 0; + color: var(--gray-600); + font-size: 14px; + font-weight: 400; + line-height: 20px; + } + .tagTop, + .tagBottom { + display: flex; + gap: 8px; + @media (max-width: 640px) { + flex-direction: column; + } + + div { + color: var(--gray-800); + text-align: center; + text-align: center; + letter-spacing: 0.2px; + background-color: #fafbfc; + border: 1px solid #d9dae5; + border-radius: 2px; + align-items: center; + width: fit-content; + padding: 4px 8px; + font-size: 12px; + font-weight: 700; + line-height: 16px; + box-sizing: border-box; + display: flex; + flex-direction: row; + justify-content: flex-start; + align-items: center; + gap: 4px; + + img { + height: 16px; + width: 16px; + } + p { + margin: 0; + padding: 0; + font-size: 12px; + line-height: 16px; + font-weight: 700; + } + } + } + } +} + +.QuerySummaryAndSort { + display: flex; + justify-content: space-between; + align-items: center; + height: 45px; + margin: 0; + padding: 0; + gap: 16px; + @media (max-width: 640px) { + flex-direction: column; + height: auto; + align-items: flex-start; + } + .summary { + gap: 24px; + display: flex; + align-items: center; + justify-content: space-between; + p { + display: inline; + color: var(--gray-600); + font-size: 14px; + margin: 0; + padding: 0; + @media (max-width: 640px) { + display: block; + } + } + button { + margin: 0; + padding: 0; + cursor: pointer; + background-color: transparent; + border: none; + i { + font-size: 14px; + color: var(--link-color); + } + } + } + .sort { + display: flex; + justify-content: flex-end; + align-items: center; + flex-grow: 1; + button { + box-sizing: border-box; + font-size: 12px; + font-weight: 700; + color: var(--gray-600); + border: none; + height: 45px; + line-height: 24px; + text-transform: uppercase; + padding: 8px; + background-color: var(--background-color); + margin: 0; + background-color: none; + cursor: pointer; + display: flex; + justify-content: center; + align-items: center; + i { + margin-left: 4px; + } + } + .selectedButton { + border-bottom: 2px solid var(--text-color); + } + } +} +.hrLine { + margin: 0; + padding: 0; + border-bottom: 2px solid #d5dade; +} + +.resultPerPage { + margin: 0; + padding: 0; + display: flex; + gap: 16px; + align-items: center; + + p { + margin: 0; + padding: 0; + font-size: 12px; + color: var(--text-color); + } + display: flex; + justify-content: flex-end; + align-items: center; + ul { + margin: 0; + padding: 0; + display: flex; + justify-content: flex-start; + align-items: center; + gap: 8px; + li { + text-decoration: none; + list-style: none; + display: flex; + justify-content: flex-start; + align-items: center; + button { + padding: 8px; + font-weight: 600; + color: var(--primary-7); + background-color: white; + border: none; + border-radius: 4px; + cursor: pointer; + } + .active { + background-color: var(--primary-7); + color: white; + } + } + } +} + +.pager { + margin: 0; + padding: 0; + display: flex; + justify-content: flex-start; + align-items: center; + gap: 8px; + button { + padding: 8px; + font-weight: 600; + color: var(--primary-7); + background-color: white; + border: 1px solid var(--primary-7); + border-radius: 4px; + cursor: pointer; + } + .active { + background-color: var(--primary-7); + color: white; + } +} + +.bottom { + display: flex; + justify-content: space-between; + align-items: center; + padding: 12px 0; + + @media (max-width: 640px) { + flex-direction: column; + align-items: flex-start; + gap: 16px; + } +} + +.facet { + margin-bottom: 12px; + .facetTop { + display: flex; + justify-content: space-between; + align-items: center; + cursor: pointer; + h3 { + font-size: 15px; + color: #0059b3; + font-weight: 700; + white-space: nowrap; + text-overflow: ellipsis; + overflow: hidden; + text-transform: capitalize; + line-height: 35px; + margin: 0; + padding: 0; + } + i { + cursor: pointer; + color: #0059b3; + font-size: 14px; + font-weight: bold; + } + } + ul { + margin: 0; + padding: 10px 0; + button { + display: flex; + justify-content: flex-start; + align-items: center; + text-decoration: none; + cursor: pointer; + overflow: visible; + width: auto; + padding: 0; + margin: 0; + background: none; + border: none; + color: #296896; + width: 100%; + text-align: left; + margin-top: 2px; + padding: 3px 0; + font-size: 15px; + gap: 4px; + i { + font-size: 10px; + } + } + li { + text-decoration: none; + list-style: none; + white-space: nowrap; + text-overflow: ellipsis; + overflow: hidden; + line-height: 1.2em; + text-align: center; + color: #22222a; + align-items: flex-end; + font-size: 12px; + font-weight: 300; + display: flex; + box-sizing: border-box; + display: flex; + justify-content: flex-start; + align-items: flex-start; + gap: 12px; + margin-bottom: 8px; + input { + position: relative; + min-width: 17px; + min-height: 17px; + padding: 0; + margin: 0; + background: #fff; + border: 1px solid #7e8c9a; + border-radius: 2px; + outline: none; + transition: all 0.2s; + cursor: pointer; + box-sizing: border-box; + } + div { + display: flex; + justify-content: flex-start; + align-items: flex-start; + gap: 4px; + // background-color: yellow; + max-height: 16px; + img { + width: 16px; + height: 16px; + display: inline-block; + } + p { + color: var(--text-color); + margin: 0; + padding: 0; + display: flex; + justify-content: flex-start; + align-items: center; + // flex-wrap: wrap; + flex-grow: 1; + overflow: hidden; + gap: 8px; + line-height: 12px; + } + span { + color: var(--gray-900); + } + } + } + } +} + +.FacetBreadCrumbs { + display: flex; + justify-content: space-between; + align-items: flex-start; + margin-bottom: 12px; + + @media (max-width: 640px) { + flex-direction: column-reverse; + align-items: flex-start; + gap: 16px; + } + div > div { + display: flex; + justify-content: flex-start; + align-items: flex start; + p { + margin: 0; + padding: 0; + color: var(--link-color); + font-size: 14px; + font-weight: 400; + display: inline; + margin-right: 8px; + } + } + .clearButton { + margin: 0; + padding: 0; + font-size: 14px; + color: var(--link-color-hover); + cursor: pointer; + background-color: transparent; + border: none; + &:hover { + text-decoration: underline; + } + } + .FacetBreadCrumbsValues { + display: flex; + justify-content: flex-start; + align-items: center; + flex-wrap: wrap; + + p { + display: inline-flex; + justify-content: flex-start; + align-items: center; + gap: 4px; + text-decoration: none; + cursor: pointer; + overflow: visible; + width: auto; + padding: 0; + margin: 0; + background: none; + border: none; + color: var(--link-color-hover); + font-size: 14px; + margin-right: 15px; + outline-offset: 1px; + + &:hover { + text-decoration: underline; + } + } + } +} diff --git a/src/components/NavbarItems/data.ts b/src/components/NavbarItems/data.ts new file mode 100644 index 00000000000..1fc2989a8a7 --- /dev/null +++ b/src/components/NavbarItems/data.ts @@ -0,0 +1,211 @@ +export const moduleIconAndColor = { + 'Infrastructure as Code Management': { + iconUrl: 'https://developer.harness.io/img/icon_iacm_s.svg', + colors: { + border: '--mod-iacm-200', + backgroundColor: '--mod-iacm-100', + color: '--mod-iacm-300', + }, + }, + 'Software Supply Chain Assurance': { + iconUrl: 'https://developer.harness.io/img/icon_ssca_s.svg', + colors: { + border: '--mod-ssca-200', + backgroundColor: '--mod-ssca-100', + color: '--mod-ssca-300', + }, + }, + 'Supply Chain Security': { + iconUrl: 'https://developer.harness.io/img/icon_ssca_s.svg', + colors: { + border: '--mod-ssca-200', + backgroundColor: '--mod-ssca-100', + color: '--mod-ssca-300', + }, + }, + 'Continuous Error Tracking': { + iconUrl: 'https://developer.harness.io/img/icon_cet_s.svg', + colors: { + border: '--mod-cet-200', + backgroundColor: '--mod-cet-100', + color: '--mod-cet-300', + }, + }, + 'Open Source': { + iconUrl: 'https://developer.harness.io/img/icon_harness_s.svg', + colors: { + border: '--mod-opensource-200', + backgroundColor: '--mod-opensource-100', + color: '--mod-opensource-300', + }, + }, + 'Security Testing Orchestration': { + iconUrl: 'https://developer.harness.io/img/icon_sto_s.svg', + colors: { + border: '--mod-sto-200', + backgroundColor: '--mod-sto-100', + color: '--mod-sto-300', + }, + }, + + 'Internal Developer Portal': { + iconUrl: 'https://developer.harness.io/img/icon_idp_s.svg', + colors: { + border: '--mod-idp-200', + backgroundColor: '--mod-idp-100', + color: '--mod-idp-300', + }, + }, + 'Code Repository': { + iconUrl: 'https://developer.harness.io/img/icon_code_s.svg', + colors: { + border: '--mod-code-200', + backgroundColor: '--mod-code-100', + color: '--mod-code-300', + }, + }, + 'Continuous Integration': { + iconUrl: 'https://developer.harness.io/img/icon_ci_s.svg', + colors: { + border: '--mod-ci-200', + backgroundColor: '--mod-ci-100', + color: '--mod-ci-300', + }, + }, + 'Database DevOps': { + iconUrl: 'https://developer.harness.io/img/icon_dbdevops_s.svg', + colors: { + border: '--mod-dbdevops-200', + backgroundColor: '--mod-dbdevops-100', + color: '--mod-dbdevops-300', + }, + }, + 'Continuous Delivery': { + iconUrl: 'https://developer.harness.io/img/icon_cd_s.svg', + colors: { + border: '--mod-cd-200', + backgroundColor: '--mod-cd-100', + color: '--mod-cd-300', + }, + }, + 'Continuous Delivery & GitOps': { + iconUrl: 'https://developer.harness.io/img/icon_cd_s.svg', + colors: { + border: '--mod-cd-200', + backgroundColor: '--mod-cd-100', + color: '--mod-cd-300', + }, + }, + 'Harness Platform': { + iconUrl: 'https://developer.harness.io/img/icon_harness_s.svg', + colors: { + border: '', + backgroundColor: '', + color: '', + }, + }, + Armory: { + iconUrl: 'https://developer.harness.io/img/icon_armory_s.svg', + colors: { + border: '--mod-ci-200', + backgroundColor: '--mod-ci-100', + color: '--mod-ci-300', + }, + }, + 'Feature Flags': { + iconUrl: 'https://developer.harness.io/img/icon_ff_s.svg', + colors: { + border: '--mod-ff-200', + backgroundColor: '--mod-ff-100', + color: '--mod-ff-300', + }, + }, + 'Cloud Development Environments': { + iconUrl: + 'https://developer.harness.io/img/icon_cloud_development_environments_s.svg', + colors: { + border: '--mod-cde-200', + backgroundColor: '--mod-cde-100', + color: '--mod-cde-300', + }, + }, + 'Service Reliability Management': { + iconUrl: 'https://developer.harness.io/img/icon_srm_s.svg', + colors: { + border: '--mod-srm-200', + backgroundColor: '--mod-srm-100', + color: '--mod-srm-300', + }, + }, + 'Cloud Cost Management': { + iconUrl: 'https://developer.harness.io/img/icon_ccm_s.svg', + colors: { + border: '--mod-ccm-200', + backgroundColor: '--mod-ccm-100', + color: '--mod-ccm-300', + }, + }, + 'Software Engineering Insights': { + iconUrl: 'https://developer.harness.io/img/icon_sei_s.svg', + colors: { + border: '--mod-sei-200', + backgroundColor: '--mod-sei-100', + color: '--mod-sei-300', + }, + }, + 'Chaos Engineering': { + iconUrl: 'https://developer.harness.io/img/icon_ce_s.svg', + colors: { + border: '--mod-ce-200', + backgroundColor: '--mod-ce-100', + color: '--mod-ce-300', + }, + }, + 'AI Code Assistant': { + iconUrl: + 'https://cdn.prod.website-files.com/6222ca42ea87e1bd1aa1d10c/666730eaefeed82bad545d10_Subtract.svg', + colors: { + border: '--mod-iacm-200', + backgroundColor: '--mod-iacm-100', + color: '--mod-iacm-300', + }, + }, + 'Harness Intelligence': { + iconUrl: + 'https://cdn.prod.website-files.com/6222ca42ea87e1bd1aa1d10c/666730eaefeed82bad545d10_Subtract.svg', + colors: { + border: '--mod-iacm-200', + backgroundColor: '--mod-iacm-100', + color: '--mod-iacm-300', + }, + }, + 'Feature Management & Experimentation': { + iconUrl: + 'https://cdn.prod.website-files.com/6222ca42ea87e1bd1aa1d10c/66e605fcc40e1bbd3f7b2604_FME%20icon%20color.svg', + colors: { + border: '--mod-ff-200', + backgroundColor: '--mod-ff-100', + color: '--mod-ff-300', + }, + }, + 'Artifact Registry': { + iconUrl: + 'https://cdn.prod.website-files.com/6222ca42ea87e1bd1aa1d10c/66df6931514873567c9cdc3c_artifact-registry-logo-icon.svg', + colors: { color: '' }, + }, +}; + +export const contentTypeData = { + 'Documentation (FirstGen)': { + border: '--gray-200', + backgroundColor: '--gray-200', + }, + 'Documentation (NextGen)': { + border: '--primary-3', + backgroundColor: '--primary-3', + }, + 'Knowledge Base': { + border: '', + backgroundColor: '', + }, +}; diff --git a/src/components/NavbarItems/index.tsx b/src/components/NavbarItems/index.tsx new file mode 100644 index 00000000000..0944f133ae3 --- /dev/null +++ b/src/components/NavbarItems/index.tsx @@ -0,0 +1,65 @@ +import { buildSearchBox, SearchBoxState } from '@coveo/headless'; +import React, { useEffect, useRef, useState } from 'react'; +import SearchBox from './components/SearchBox'; +import InitializeCoveo from './Engine'; +import SearchResultBox from './components/SearhResultBox'; +const CoveoSearch = () => { + const searchBoxRef = useRef(null); + const [open, setOpen] = useState(false); + const [searchValue, setSearchValue] = useState(''); + const [searchBoxController, setSearchBoxController] = useState(null); + const handleClickOutside = (event: MouseEvent) => { + if ( + searchBoxRef.current && + !searchBoxRef.current.contains(event.target as Node) + ) { + setOpen(false); + } + }; + + useEffect(() => { + document.addEventListener('mousedown', handleClickOutside); + return () => { + document.removeEventListener('mousedown', handleClickOutside); + }; + }, []); + + const handleSearch = (searchValue: string) => { + if (searchValue.trim().length > 0) { + setOpen(true); + setSearchValue(searchValue); + } + }; + const [engine, setEngine] = useState(null); + useEffect(() => { + async function Initialize() { + const engine = await InitializeCoveo(); + setEngine(engine); + if (engine) { + const SearchBoxController = buildSearchBox(engine); + setSearchBoxController(SearchBoxController); + } + } + Initialize(); + }, []); + + if (!engine) { + return <>; + } + if (!searchBoxController) { + return
; + } + return ( +
+ + +
+ ); +}; + +export default CoveoSearch; diff --git a/src/components/Roadmap/data/fmeData.ts b/src/components/Roadmap/data/fmeData.ts index bf3ca58f156..e5608f3a025 100644 --- a/src/components/Roadmap/data/fmeData.ts +++ b/src/components/Roadmap/data/fmeData.ts @@ -4,36 +4,11 @@ export const FmeData: Horizon = { Now: { description: "Q4 2024, Nov 2024-Jan 2025", feature: [ - { - tag: [{ value: "Targeting" }], - title: "Large segments", - description: "New segment type enabling large-scale audience targeting up to 1M keys.", - }, - { - tag: [{ value: "Better Together" }], - title: "Access Split from within Harness app", - description: "Allow Harness customers to authenticate and access Split from the Harness application.", - }, { tag: [{ value: "Targeting" }], title: "Flag impressions toggle", description: "Disable the flow of impressions for individual flags.", }, - { - tag: [{ value: "Monitoring" }], - title: "Feature flag alerts on Monitoring tab", - description: "Show significance alerts on the monitoring tab, in addition to the threshold-based alerts shown now.", - }, - { - tag: [{ value: "Measurement" }], - title: "Rum agents sampling", - description: "Control the number of events that are auto-captured by the Split Suite and RUM agents.", - }, - { - tag: [{ value: "Targeting" }], - title: "SemVer attribute dictionary support", - description: "Support SemVer type attributes and suggested values in the attribute dictionary to streamline entry & reduce risk of errors.", - }, ], }, Next: { @@ -78,6 +53,11 @@ export const FmeData: Horizon = { title: "Warehouse-native experimentation", description: "Experiment directly on impressions & events in your data warehouse.", }, + { + tag: [{ value: "SDK" }], + title: "Remote evaluation client-side SDKs", + description: "No rules are exposed to client-side. Instead, these thin clients utilize a secure cloud service for flag evaluations.", + }, { tag: [{ value: "SDK" }], title: "Extend SDK support", @@ -88,6 +68,28 @@ export const FmeData: Horizon = { Released: { description: "What has been released", feature: [ + { + tag: [{ value: "Targeting" }], + title: "Large segments", + description: "New segment type enabling large-scale audience targeting up to 1M keys. Even higher limits available by request.", + }, + { + tag: [{ value: "Targeting" }], + title: "SemVer attribute dictionary support", + description: "Support SemVer type attributes and suggested values in the attribute dictionary to streamline entry & reduce risk of errors.", + link:"https://www.split.io/releases/2024-12-06/", + }, + { + tag: [{ value: "Better Together" }], + title: "Access Split from within Harness app", + description: "Allow Harness customers to authenticate and access Split from the Harness application.", + }, + { + tag: [{ value: "Monitoring" }], + title: "Feature flag alerts on Monitoring tab", + description: "Show significance alerts on the monitoring tab, in addition to the threshold-based alerts shown now.", + link:"https://www.split.io/releases/#zzodil-2024-11-27", + }, { tag: [{ value: "Measurement" }], title: "Monitoring tab: feature traffic insights", diff --git a/src/components/Roadmap/data/sscaData.ts b/src/components/Roadmap/data/sscaData.ts index dce91056bab..280d05e5d60 100644 --- a/src/components/Roadmap/data/sscaData.ts +++ b/src/components/Roadmap/data/sscaData.ts @@ -7,7 +7,7 @@ export const SscaData: Horizon = { { tag: [], title: "Repo Security Posture Management for GitHub", - description: "Identify misconfigs in source code repositories based on industry standards such as CIS and OWASP Top 10 CI/CD Security Risks. Also, includes support for SBOM generation and security tests such as SAST, SCA, and secrets scanning.", + description: "Identify misconfigurations in source code repositories based on industry standards such as CIS and OWASP Top 10 CI/CD Security Risks. Also, includes support for SBOM generation and security tests such as SAST, SCA, and secrets scanning.", }, { tag: [], @@ -16,73 +16,109 @@ export const SscaData: Horizon = { }, { tag: [], - title: "Unified Security View", - description: "A unified view on OSS dependencies, vulnerabilities, and deployment details for artifacts all in one place.", + title: "CI/CD Security Posture Management for GitHub Workflows & Harness Pipelines", + description: "Perform static analysis in GitHub workflows and Harness pipelines to detect risky actions and misconfigurations.", }, { tag: [], - title: "ECR & ACR support", - description: "Support to connect with ECR and ACR for SBOM generation and management.", + title: "Compliance Report Generation", + description: "Generate and download reports based on compliance standards such as CIS v1.0, and OWASP Top 10 CI/CD Security Risks.", }, { tag: [], - title: "Base Image Detection", - description: "The feature helps in prioritizing vulnerability assessment between base images and applications, while also enforcing policies such as identifying newer versions of base images.", + title: "HashiCorp Vault Support", + description: "Leverage keys from HashiCorp Vault to attest and verify the build provenance.", }, ], }, "Now": { - description: "Q3 2024, Aug-Oct 2024", + description: "Q4 2024, Nov 2024 - Jan 2025", feature: [ { tag: [], - title: "Report Generation", - description: "Generate and download reports based on compliance standards such as CIS, and OWASP Top 10 CI/CD Security Risks", + title: "Repo Security Posture Management for Harness Code", + description: "Identify misconfigurations in source code repositories based on industry standards such as CIS and OWASP Top 10 CI/CD Risk. Also, includes support for SBOM generation and security tests such as SAST, SCA, and secrets scanning.", }, + { tag: [], - title: "CI/CD Security Posture Management for GitHub Workflows & Harness Pipelines:", - description: "Perform static analysis in Github workflows and Harness pipelines to detect risky actions and misconfigurations. ", + title: "SBOM & SLSA support with GitHub Actions", + description: "Generate SBOM and achieve SLSA compliance using GitHub Actions for artifacts built in GitHub.", }, { tag: [], - title: "Repo Security Posture Management for Harness Code", - description: "Identify misconfigs in source code repositories based on industry standards such as CIS and OWASP Top 10 CI/CD Risk. Also, includes support for SBOM generation and security tests such as SAST, SCA, and secrets scanning.", + title: "Artifact Signing and Verification", + description: "Ensure built artifact is not tampered before deployment.", }, { tag: [], - title: "SBOM & SLSA support with GitHub Actions and Jenkins Plugins", - description: "Generate SBOM and achieve SLSA compliance using GitHub Actions and Jenkins Plugins.", + title: "SBOM API Support", + description: "Enable SBOM download APIs for repos and artifacts.", }, { tag: [], - title: "HashiCorp Vault Support", - description: "Leverage keys from HashiCorp Vault to attest and verify your artifacts.", + title: "SLSA Policies", + description: "Out of the box policies to ensure compliance with Level 1, Level 2, and Level 3 requirements.", + }, + { + tag: [], + title: "Licensing Policies", + description: "Out of the box open source policies to check for non-compliant licenses in dependencies.", + }, + { + tag: [], + title: "Report Generation", + description: "Generate and download reports based on compliance standards such as CIS, and OWASP Top 10 CI/CD Security Risks", + }, + { + tag: [], + title: "Bulk Onboarding", + description: "Allow users to bulk onboard GitHub repos across org and accounts via API", }, ], }, "Next": { - description: "Q4 2024, Nov 2024-Jan 2025", + description: "Q1 2025, Feb - April 2025", feature: [ { tag: [], - title: "OpenSSF, OWASP Top 10 OSS, EO14028 & NIST SP800-204D Support", - description: "Out of the box rules for supporting OpenSSF, OWASP Top 10 OSS, EO14028, & NIST SP800-204D compliance standards.", + title: "Artifact Chain of Custody V2", + description: "Enhanced audit trail that seamlessly integrates all pipeline events at an account level, spanning from source code to deployment.", + }, + { + tag: [], + title: "OSS Top 10 Risks", + description: "Visibility into open source risks across built artifacts using SBOMs.", + }, + { + tag: [], + title: "OSS Top 10 Policies", + description: "Out of the box policies to identify risks in open source dependencies based on OSS Top 10 Risks.", + }, + { + tag: [], + title: "UX Enhancements", + description: "Improving search, filtering across product pages and overall user experience.", }, { tag: [], - title: "Remediation Tracker Support for Compliance Standards Issues", - description: "Assign issues to developers and track them with a remediation tracker to ensure governance on compliance standards.", + title: "OpenSSF Integration", + description: "Support for OpenSSF Rules.", }, { tag: [], - title: "Governance Policies for Compliance Standards", - description: "View and enforce policies on Code Repo, Artifacts and CI/CD pipelines based on rules defined in supply chain standards.", + title: "CI/CD Security for Jenkins", + description: "Perform static analysis to detect risks and misconfigurations in Jenkins pipelines.", + }, + { + tag: [], + title: "mTLS support for SCS plugins", + description: "mTLS support for SCS plugin to ensure secure communication with Harness services.", }, ], }, "Later": { - description: "Q1 2025+, Feb 2025 & beyond", + description: "Q2 2025+, May 2025 & beyond", feature: [ { tag: [], @@ -91,18 +127,23 @@ export const SscaData: Horizon = { }, { tag: [], - title: "CI/CD Security for Jenkins", - description: "Perform static analysis in Jenkins pipelines to detect risky plugins and misconfigurations.", + title: "SBOM & SLSA support for Jenkins", + description: "Generate SBOMs and achieve SLSA compliance using Jenkins pipelines.", + }, + { + tag: [], + title: "NIST SP800-204D Support", + description: "Out of the box rules for supporting NIST SP800-204D compliance standards.", }, { tag: [], - title: "Harness Runtime Analyzer", - description: "Identify anomalous behavior and threats in Harness Pipelines and GitHub Workflows", + title: "Remediation Tracker", + description: "Assign vulnerabilities & compliance issues to developers using remediation tracker to track across different types of targets (Artifact, CI/CD, Repos).", }, { tag: [], title: "SBOM Scoring in Drift Detection", - description: "View risk scores on dependencies that get added or removed between artifact drifts which contain vulnerabilities, have invalid licenses or are unmaintained.", + description: "View risk scores on dependencies that get added or removed between artifact drifts which contain vulnerabilities, have invalid licenses, or are unmaintained.", }, ], }, diff --git a/src/components/University/data/iacm-certification-developer-review-guide.md b/src/components/University/data/iacm-certification-developer-review-guide.md index eeaf2693c71..077c716554c 100644 --- a/src/components/University/data/iacm-certification-developer-review-guide.md +++ b/src/components/University/data/iacm-certification-developer-review-guide.md @@ -17,9 +17,9 @@ | Configuring stages for infrastructure provisioning | [State Migration](https://developer.harness.io/docs/infra-as-code-management/remote-backends/state-migration/) | | Using the step library to manage infrastructure lifecycles | [IaCM Plugins](https://developer.harness.io/docs/category/plugins) | | **5. Drift Management** | | -| Identifying configuration drift and understanding its causes | [Drift Detection](https://developer.harness.io/docs/infra-as-code-management/pipelines/operations/drift-detection) | +| Identifying configuration drift and understanding its causes | [Drift Detection](https://developer.harness.io/docs/infra-as-code-management/use-iacm/drift-detection) | | Setting up drift detection pipelines | [Drift Plugin](https://developer.harness.io/docs/infra-as-code-management/pipelines/iacm-plugins/terraform-plugins/#detect-drift) | -| Analyzing and remediating drift issues | [Drift Detection](https://developer.harness.io/docs/infra-as-code-management/pipelines/operations/drift-detection) | +| Analyzing and remediating drift issues | [Drift Detection](https://developer.harness.io/docs/infra-as-code-management/use-iacm/drift-detection) | | **6. Governance and Security** | | | Implementing Role-Based Access Control (RBAC) | [Role-based access control (RBAC)](https://developer.harness.io/docs/infra-as-code-management/project-setup/workspace-rbac) | | Configuring Audit Trails and Monitoring for compliance | [Built-in policies](https://developer.harness.io/docs/infra-as-code-management/policies/terraform-plan-cost-policy) | diff --git a/src/css/custom.css b/src/css/custom.css index 87f265dbf65..44859ef4e72 100644 --- a/src/css/custom.css +++ b/src/css/custom.css @@ -1,7 +1,7 @@ -@import url("https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&display=swap"); -@import "fontawesome/css/fontawesome.min.css"; -@import "fontawesome/css/solid.min.css"; -@import "fontawesome/css/regular.min.css"; +@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&display=swap'); +@import 'fontawesome/css/fontawesome.min.css'; +@import 'fontawesome/css/solid.min.css'; +@import 'fontawesome/css/regular.min.css'; /** * Any CSS included here will be global. The classic template * bundles Infima by default. Infima is a CSS framework designed to @@ -300,7 +300,6 @@ --mod-opensource-200: #d9dae5; --mod-opensource-100: #f3f3fa; - /* ----- */ --ifm-link-decoration: none; @@ -321,8 +320,8 @@ --ifm-navbar-height: 4.25rem; --harness-font-family: Inter, sans-serif, -apple-system, BlinkMacSystemFont, - "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, - "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; + 'Segoe UI', Roboto, 'Helvetica Neue', Arial, 'Noto Sans', sans-serif, + 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji'; /* here */ --ifm-color-content-light: var(--gray-800); @@ -340,11 +339,13 @@ --hover: var(--gray-200); --platform-card-bg: var(--white); --homepageFeatureCard: white; - + --coveo-card-bg: rgba(255, 255, 255); + --link-color: #0278d5; + --link-color-hover: #0059b3; } /* darkmode css */ -html[data-theme="dark"] .tabs__item--active { +html[data-theme='dark'] .tabs__item--active { color: black; background-color: var(--primary-1); border-bottom-color: var(--primary); @@ -363,19 +364,19 @@ summary { color: var(--ifm-heading-color); } /* logo-hdh-dark-theme.svg */ -html[data-theme="dark"] .navbar .navbar__logo { - content: url("/img/logo-hdh-dark-theme.svg"); +html[data-theme='dark'] .navbar .navbar__logo { + content: url('/img/logo-hdh-dark-theme.svg'); } /* logo_dlp.svg */ -html[data-theme="light"] .navbar .navbar__logo { - content: url("/img/logo_dlp.svg"); +html[data-theme='light'] .navbar .navbar__logo { + content: url('/img/logo_dlp.svg'); } -html[data-theme="dark"] .CoveoSearchbox .magic-box { +html[data-theme='dark'] .CoveoSearchbox .magic-box { border: none !important; } -html[data-theme="dark"] .navbar { +html[data-theme='dark'] .navbar { box-shadow: 0 2px 4px 0 rgba(255, 255, 255, 0.2); } /* html[data-theme="dark"] code { @@ -383,7 +384,7 @@ html[data-theme="dark"] .navbar { color: #000000; } */ /* For readability concerns, you should choose a lighter palette in dark mode. */ -[data-theme="dark"] { +[data-theme='dark'] { --platform-card-bg: var(--gray-200); --ifm-color-primary: var(--primary-5); --ifm-color-primary-dark: var(--primary-6); @@ -397,8 +398,14 @@ html[data-theme="dark"] .navbar { --ifm-link-color: var(--ifm-color-primary); --black: #070707; - /* --white: #070707; */ - --gray-900:var(--gray-200) + + --gray-900: var(--gray-200); + --gray-300: var(--gray-800); + --gray-100: var(--gray-1000); + --gray-400: var(--gray-700); + --gray-600: var(--gray-500); + --gray-500: var(--gray-600); + --ifm-navbar-shadow: white !important; --ifm-navbar-background-color: var(--black) !important; --ifm-background-color: var(--black) !important; @@ -410,11 +417,13 @@ html[data-theme="dark"] .navbar { --ifm-color-content-light: var(--gray-200) !important; --card-border: 2px solid var(--gray-200); --img-border: 1px solid var(--gray-200); - --gray-500: #d9dae5; - + --coveo-card-bg: rgba(255, 255, 255, 0.07); --hover: var(--gray-800); --homepageFeatureCard: var(--black); + --link-color: #0059b3; + --link-color-hover: #296896; } + /* darkmode css end*/ html { font-size: 16px; @@ -437,7 +446,7 @@ html *:after { body { font-family: var(--harness-font-family) !important; - background-color: var( --background-color); + background-color: var(--background-color); } a:hover { @@ -499,7 +508,7 @@ h2 { display: none; } -.navbar__item img[alt="BETA"] { +.navbar__item img[alt='BETA'] { margin-left: -0.25rem; margin-top: 10px; width: 24px; @@ -514,7 +523,7 @@ h2 { margin-right: 0.2em; } -.theme-doc-markdown img[class^="img_"] { +.theme-doc-markdown img[class^='img_'] { border: 1px solid #d9dae5; /* var(--gray-400); */ border-radius: 8px; @@ -628,7 +637,7 @@ details summary::before { color: var(--mod-opensource-200); } -.table-of-contents ul li span[class*="color-"] { +.table-of-contents ul li span[class*='color-'] { color: unset; } @@ -665,7 +674,7 @@ a[aria-label="Search by Algolia"] { display: none !important; } -button[title="Copy"] { +button[title='Copy'] { opacity: 0.6 !important; } @@ -697,7 +706,7 @@ button[title="Copy"] { align-items: center; } -body.DocSearch--active #__docusaurus > div[class*="announcementBar"] { +body.DocSearch--active #__docusaurus > div[class*='announcementBar'] { /* hide the announcement bar while the search bar called out */ display: none; } @@ -764,20 +773,21 @@ a.card p.text--truncate { .horizontal-bar { margin-top: 15px; - padding: var(--ifm-menu-link-padding-vertical) var(--ifm-menu-link-padding-horizontal); - font-family: "Open Sans", sans-serif; + padding: var(--ifm-menu-link-padding-vertical) + var(--ifm-menu-link-padding-horizontal); + font-family: 'Open Sans', sans-serif; font-size: 12px; font-weight: 600; } -html[data-theme="light"] .horizontal-bar { +html[data-theme='light'] .horizontal-bar { border-top: 1px solid #e0e0e0; border-right: 1px solid #e0e0e0; background-color: #effbff; color: #b0b1c1; } -html[data-theme="dark"] .horizontal-bar { +html[data-theme='dark'] .horizontal-bar { border-top: 1px solid #45494f; border-right: 1px solid #45494f; background-color: #002531; @@ -853,7 +863,6 @@ figure p { .searchBar .DocSearch-Button { width: 100%; } - } @media screen and (max-width: 1199px) { @@ -864,14 +873,14 @@ figure p { align-items: center; } /* for tablet */ - .navbar__item img[alt="BETA"] { + .navbar__item img[alt='BETA'] { display: none; } .searchBar { display: none; } - - .navbar__logo{ + + .navbar__logo { width: 200px; } } @@ -892,7 +901,7 @@ figure p { #search-button { color: var(--ifm-heading-color); - font-size: 24px; + font-size: 16px; position: absolute; right: 0; margin-right: 20px; @@ -901,6 +910,10 @@ figure p { display: none; } +.main-nav-coveo { + display: block; +} + /*== start of code for tooltips ==*/ .tool { @@ -931,11 +944,11 @@ figure p { border-radius: 4px; padding: 10px; color: #ffffff; - z-index: 1; + z-index: 100; } [hover-tooltip]:hover::after { - content: ""; + content: ''; position: absolute; display: block; left: 50%; @@ -947,15 +960,15 @@ figure p { border-color: var(--tooltip-bg-color) transparent transparent transparent; border-width: var(--caret-height) var(--caret-width) 0; animation: fade-in 300ms ease; - z-index: 1; + z-index: 100; } -[hover-tooltip][tooltip-position="bottom"]:hover::before { +[hover-tooltip][tooltip-position='bottom']:hover::before { bottom: auto; top: calc(100% + var(--distance)); } -[hover-tooltip][tooltip-position="bottom"]:hover::after { +[hover-tooltip][tooltip-position='bottom']:hover::after { bottom: auto; top: calc(100% + calc(var(--distance) - var(--caret-height))); border-color: transparent transparent var(--tooltip-bg-color); @@ -973,6 +986,9 @@ figure p { /*== end of code for tooltips ==*/ @media screen and (max-width: 996px) { + .main-nav-coveo { + display: none; + } #search-button { display: block; } @@ -983,7 +999,7 @@ figure p { /******************* chatbot css variables *************/ -[data-theme="dark"] { +[data-theme='dark'] { --main-background-color: #333137; --chatbot-input-background-color: #35343b; @@ -1020,13 +1036,13 @@ figure p { rgba(51, 49, 55, 0.7) 85.94% ); --input-focus: 1px solid white; - --option-img: url("https://developer.harness.io/img/BarsDark.svg") no-repeat; - --send-img: url("https://developer.harness.io/img/SendIconDark.svg") no-repeat; + --option-img: url('https://developer.harness.io/img/BarsDark.svg') no-repeat; + --send-img: url('https://developer.harness.io/img/SendIconDark.svg') no-repeat; --dropdown-bg: #ede8fb; } -[data-theme="light"] { +[data-theme='light'] { --main-background-color: #faf8ff; --chatbot-input-background-color: #ede8fb; @@ -1053,168 +1069,183 @@ figure p { --aida-btn-border: 1px solid #9681a7; --input-focus: 1px solid #7e5be0; - --dropdown-bg: #7E5BE0; - --option-img: url("https://developer.harness.io/img/Bars.svg") no-repeat; - --send-img: url("https://developer.harness.io/img/SendIcon.svg") no-repeat; + --dropdown-bg: #7e5be0; + --option-img: url('https://developer.harness.io/img/Bars.svg') no-repeat; + --send-img: url('https://developer.harness.io/img/SendIcon.svg') no-repeat; } /******************* chatbot css variables end *************/ /*******************sidebar logo css start*******************/ - .menu__list-item-collapsible > a,.menu__link{ +.menu__list-item-collapsible > a, +.menu__link { height: 52px; - } - .menu { +} +.menu { margin-bottom: 0px !important; - } -.collapseSidebarButton_PEFL{ +} +.collapseSidebarButton_PEFL { width: 100% !important; } -.sidebar-platform > div > a::before,.sidebar-platform > a::before{ - content: " "; +.sidebar-platform > div > a::before, +.sidebar-platform > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/logo.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/logo.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-cr > div > a::before,.sidebar-cr > a::before{ - content: " "; +.sidebar-cr > div > a::before, +.sidebar-cr > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_code.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_code.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-ci > div > a::before,.sidebar-ci > a::before{ - content: " "; +.sidebar-ci > div > a::before, +.sidebar-ci > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_ci.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_ci.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-ar > div > a::before,.sidebar-ar > a::before{ - content: " "; +.sidebar-ar > div > a::before, +.sidebar-ar > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_artifact_registry.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_artifact_registry.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-cd > div > a::before,.sidebar-cd > a::before{ - content: " "; +.sidebar-cd > div > a::before, +.sidebar-cd > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_cd.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_cd.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-cde > div > a::before,.sidebar-cde > a::before{ - content: " "; +.sidebar-cde > div > a::before, +.sidebar-cde > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/cde_icon.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/cde_icon.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-iacm > div > a::before,.sidebar-iacm > a::before{ - content: " "; +.sidebar-iacm > div > a::before, +.sidebar-iacm > a::before { + content: ' '; display: inline-block; min-height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_iacm.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_iacm.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-ff > div > a::before,.sidebar-ff > a::before{ - content: " "; +.sidebar-ff > div > a::before, +.sidebar-ff > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_ff.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_ff.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-fme > div > a::before,.sidebar-fme > a::before{ - content: " "; +.sidebar-fme > div > a::before, +.sidebar-fme > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_fme.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_fme.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-dbdevops > div > a::before,.sidebar-dbdevops > a::before{ - content: " "; +.sidebar-dbdevops > div > a::before, +.sidebar-dbdevops > a::before { + content: ' '; display: inline-block; min-height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_dbdevops.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_dbdevops.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-ccm > div > a::before,.sidebar-ccm > a::before{ - content: " "; +.sidebar-ccm > div > a::before, +.sidebar-ccm > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_ccm.svg"); + margin-right: 16px; + background-image: url('/img/icon_ccm.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-sto > div > a::before,.sidebar-sto > a::before{ - content: " "; +.sidebar-sto > div > a::before, +.sidebar-sto > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_sto.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_sto.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-ssca > div > a::before,.sidebar-ssca > a::before{ - content: " "; +.sidebar-ssca > div > a::before, +.sidebar-ssca > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_ssca.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_ssca.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-ce > div > a::before,.sidebar-ce > a::before{ - content: " "; +.sidebar-ce > div > a::before, +.sidebar-ce > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_ce.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_ce.svg'); background-size: contain; background-repeat: no-repeat; } - -.sidebar-ir > div > a::before,.sidebar-ir > a::before{ +.sidebar-ir > div > a::before, +.sidebar-ir > a::before { content: " "; display: inline-block; height: 24px; @@ -1224,152 +1255,167 @@ figure p { background-size: contain; background-repeat: no-repeat; } -.sidebar-srm > div > a::before,.sidebar-srm > a::before{ +.sidebar-srm > div > a::before, +.sidebar-srm > a::before { content: " "; display: inline-block; height: 24px; - min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_srm.svg"); + min-width: 24px; + margin-right: 16px; + background-image: url('/img/icon_srm.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-cet > div > a::before,.sidebar-cet > a::before{ - content: " "; +.sidebar-cet > div > a::before, +.sidebar-cet > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; - margin-right:16px; - background-image: url("/img/icon_cet.svg"); + margin-right: 16px; + background-image: url('/img/icon_cet.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-idp > div > a::before,.sidebar-idp > a::before{ - content: " "; +.sidebar-idp > div > a::before, +.sidebar-idp > a::before { + content: ' '; display: inline-block; height: 24px; - min-width: 24px; + min-width: 24px; margin-right: 16px; - background-image: url("/img/icon_idp.svg"); + background-image: url('/img/icon_idp.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-sei > div > a::before,.sidebar-sei > a::before{ - content: " "; +.sidebar-sei > div > a::before, +.sidebar-sei > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/icon_sei.svg"); + background-image: url('/img/icon_sei.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-smp > div > a::before,.sidebar-smp> a::before{ - content: " "; +.sidebar-smp > div > a::before, +.sidebar-smp > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/logo.svg"); + background-image: url('/img/logo.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-harness-firstGen > div > a::before,.sidebar-harness-firstGen> a::before{ - content: " "; +.sidebar-harness-firstGen > div > a::before, +.sidebar-harness-firstGen > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/Harness_FirstGen_Logo.svg"); + background-image: url('/img/Harness_FirstGen_Logo.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-troubleshooting > div > a::before,.sidebar-troubleshooting> a::before{ - content: " "; +.sidebar-troubleshooting > div > a::before, +.sidebar-troubleshooting > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/Troubleshooting_Logo.svg"); + background-image: url('/img/Troubleshooting_Logo.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-API_Reference > div > a::before,.sidebar-API_Reference> a::before{ - content: " "; +.sidebar-API_Reference > div > a::before, +.sidebar-API_Reference > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/API_Reference_Logo.svg"); + background-image: url('/img/API_Reference_Logo.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-Release_Notes > div > a::before,.sidebar-Release_Notes> a::before{ - content: " "; +.sidebar-Release_Notes > div > a::before, +.sidebar-Release_Notes > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/Release_Notes_Logo.svg"); + background-image: url('/img/Release_Notes_Logo.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-Cloud_Operations > div > a::before,.sidebar-Cloud_Operations> a::before{ - content: " "; +.sidebar-Cloud_Operations > div > a::before, +.sidebar-Cloud_Operations > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/Cloud_Operations_Logo.svg"); + background-image: url('/img/Cloud_Operations_Logo.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-faqs > div > a::before,.sidebar-faqs> a::before{ - content: " "; +.sidebar-faqs > div > a::before, +.sidebar-faqs > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/faqs_Logo.svg"); + background-image: url('/img/faqs_Logo.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-all_docs > div > a::before,.sidebar-all_docs> a::before{ - content: " "; +.sidebar-all_docs > div > a::before, +.sidebar-all_docs > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/All_Docs_Logo.svg"); + background-image: url('/img/All_Docs_Logo.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-armory > div > a::before,.sidebar-armory> a::before{ - content: " "; +.sidebar-armory > div > a::before, +.sidebar-armory > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/armory.png"); + background-image: url('/img/armory.png'); background-size: contain; background-repeat: no-repeat; } -.sidebar-reference-arch > div > a::before,.sidebar-reference-arch> a::before{ - content: " "; +.sidebar-reference-arch > div > a::before, +.sidebar-reference-arch > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/reference_architectures_icon.svg"); + background-image: url('/img/reference_architectures_icon.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-opensource > div > a::before,.sidebar-opensource> a::before{ - content: " "; +.sidebar-opensource > div > a::before, +.sidebar-opensource > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; @@ -1377,85 +1423,90 @@ figure p { background-size: contain; background-repeat: no-repeat; } -html[data-theme="light"] .sidebar-opensource > div > a::before, -html[data-theme="light"] .sidebar-opensource > a::before { - background-image: url("/img/icon_platform.svg"); +html[data-theme='light'] .sidebar-opensource > div > a::before, +html[data-theme='light'] .sidebar-opensource > a::before { + background-image: url('/img/icon_platform.svg'); } -html[data-theme="dark"] .sidebar-opensource > div > a::before, -html[data-theme="dark"] .sidebar-opensource > a::before { - background-image: url("/img/icon_platform.svg"); +html[data-theme='dark'] .sidebar-opensource > div > a::before, +html[data-theme='dark'] .sidebar-opensource > a::before { + background-image: url('/img/icon_platform.svg'); } -.sidebar-roadmap > div > a::before,.sidebar-roadmap> a::before{ - content: " "; +.sidebar-roadmap > div > a::before, +.sidebar-roadmap > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/roadmap_icon.svg"); + background-image: url('/img/roadmap_icon.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-ilt-instructions > div > a::before,.sidebar-ilt-instructions> a::before{ - content: " "; +.sidebar-ilt-instructions > div > a::before, +.sidebar-ilt-instructions > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/ilt_instructions_icon.svg"); + background-image: url('/img/ilt_instructions_icon.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-vilt-cal > div > a::before,.sidebar-vilt-cal> a::before{ - content: " "; +.sidebar-vilt-cal > div > a::before, +.sidebar-vilt-cal > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/virtual_instructor_led_calendar_icon.svg"); + background-image: url('/img/virtual_instructor_led_calendar_icon.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-back > div > a::before,.sidebar-back> a::before{ - content: " "; +.sidebar-back > div > a::before, +.sidebar-back > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/back_icon.svg"); + background-image: url('/img/back_icon.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-allkb > div > a::before,.sidebar-allkb> a::before{ - content: " "; +.sidebar-allkb > div > a::before, +.sidebar-allkb > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/show_all_kb_icon.svg"); + background-image: url('/img/show_all_kb_icon.svg'); background-size: contain; background-repeat: no-repeat; } -.sidebar-univ > div > a::before,.sidebar-univ> a::before{ - content: " "; +.sidebar-univ > div > a::before, +.sidebar-univ > a::before { + content: ' '; display: inline-block; height: 24px; min-width: 24px; margin-right: 16px; - background-image: url("/img/university_icon.svg"); + background-image: url('/img/university_icon.svg'); background-size: contain; background-repeat: no-repeat; } /*******************sidebar logo css end*******************/ - .continuous-delivery--gitops:hover { border-color: var(--mod-cd-200) !important; } @@ -1505,21 +1556,17 @@ html[data-theme="dark"] .sidebar-opensource > a::before { border-color: var(--mod-opensource-200) !important; } - - - /*********************** navbar css ***********/ - -.navbar__items{ +.navbar__items { font-size: 16px; } -.navbar__item{ +.navbar__item { padding: 0 8px; } @media screen and (max-width: 1299px) { - .navbar__items{ + .navbar__items { font-size: 15px; } } diff --git a/src/theme/NavbarItem/ComponentTypes.js b/src/theme/NavbarItem/ComponentTypes.js index 1775aa040c0..96176f234ea 100644 --- a/src/theme/NavbarItem/ComponentTypes.js +++ b/src/theme/NavbarItem/ComponentTypes.js @@ -1,8 +1,8 @@ -import ComponentTypes from "@theme-original/NavbarItem/ComponentTypes"; -import CoveoSearch from "@site/src/components/NavbarItems/CoveoSearch"; +import ComponentTypes from '@theme-original/NavbarItem/ComponentTypes'; +import CoveoSearch from '@site/src/components/NavbarItems'; export default { ...ComponentTypes, // add CoveoSearch as a navbar item - "custom-coveo-search": CoveoSearch, + 'custom-coveo-search': CoveoSearch, }; diff --git a/yarn.lock b/yarn.lock index 4e247693e00..2783062d35c 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1240,6 +1240,52 @@ resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.5.0.tgz#bb504579c1cae923e6576a4f5da43d25f97bdbd9" integrity sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ== +"@coveo/bueno@0.46.4": + version "0.46.4" + resolved "https://registry.yarnpkg.com/@coveo/bueno/-/bueno-0.46.4.tgz#c3eb1baf7a72ed3603819382c9c0794eac859424" + integrity sha512-rZwDhCMKLRDDvhA+mFpj4xFZfjpSJsl4cU3ZhO8rLVama44tW0ttXLQDLwaBv5P4tnRgUPinTYl/UxlO59fe7w== + +"@coveo/explorer-messenger@^0.4.0": + version "0.4.0" + resolved "https://registry.yarnpkg.com/@coveo/explorer-messenger/-/explorer-messenger-0.4.0.tgz#63ab8ec07272e048be42325c22fee38d4de2919f" + integrity sha512-nVxwn+4Z+SZe6K94evxPaIeJWJq7hATNaUrTKznnauVptFMYcujts/nnhMplddm9ePg7NgZQjvcHwbi5kqw/EA== + +"@coveo/headless@2.80.5": + version "2.80.5" + resolved "https://registry.yarnpkg.com/@coveo/headless/-/headless-2.80.5.tgz#16f104c593561223baf398c03f3e1077a6902931" + integrity sha512-/cep91y/YGhoRbEvRe+s0xHtMRQA5KEYYuheppFU0nTWUl6d1eRUnhSAgXiUzZf30eCNdwvYgOs8J/RWav/PQw== + dependencies: + "@coveo/bueno" "0.46.4" + "@coveo/relay" "0.7.10" + "@coveo/relay-event-types" "9.4.0" + "@microsoft/fetch-event-source" "2.0.1" + "@reduxjs/toolkit" "2.2.7" + abab "2.0.6" + abortcontroller-polyfill "1.7.5" + coveo.analytics "2.30.38" + dayjs "1.11.12" + exponential-backoff "3.1.0" + fast-equals "5.0.1" + navigator.sendbeacon "0.0.20" + node-abort-controller "^3.0.0" + pino "8.21.0" + redux-thunk "3.1.0" + ts-debounce "4.0.0" + undici "5.28.4" + +"@coveo/relay-event-types@9.4.0": + version "9.4.0" + resolved "https://registry.yarnpkg.com/@coveo/relay-event-types/-/relay-event-types-9.4.0.tgz#3e24a6cf313811b34fad31351a2f191f77cd1fb2" + integrity sha512-Fsm3fUWj5ofbYB0Bz4j97LXwsX6MRxYA7NX4DOF7BrXW6GlSvJCH5lR7EModMsi9OTy9ee6pQ2A2xvXkWMBmGg== + +"@coveo/relay@0.7.10": + version "0.7.10" + resolved "https://registry.yarnpkg.com/@coveo/relay/-/relay-0.7.10.tgz#29975370e8b270dec4cb10d91cb72d77b6e768fc" + integrity sha512-d/4Vf8wwj746M0RV9xAek7SV/rZhv3ERoQoZo6I2IKAqzMxX8r0Vyrau+UZG1sdxybd6zzo8AVIzvRFHGXxvnA== + dependencies: + "@coveo/explorer-messenger" "^0.4.0" + uuid "^9.0.1" + "@csstools/cascade-layer-name-parser@^2.0.4": version "2.0.4" resolved "https://registry.yarnpkg.com/@csstools/cascade-layer-name-parser/-/cascade-layer-name-parser-2.0.4.tgz#64d128529397aa1e1c986f685713363b262b81b1" @@ -2106,6 +2152,11 @@ resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.57.1.tgz#de633db3ec2ef6a3c89e2f19038063e8a122e2c2" integrity sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q== +"@fastify/busboy@^2.0.0": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@fastify/busboy/-/busboy-2.1.1.tgz#b9da6a878a371829a0502c9b6c1c143ef6663f4d" + integrity sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA== + "@hapi/hoek@^9.0.0", "@hapi/hoek@^9.3.0": version "9.3.0" resolved "https://registry.yarnpkg.com/@hapi/hoek/-/hoek-9.3.0.tgz#8368869dcb735be2e7f5cb7647de78e167a251fb" @@ -2275,6 +2326,11 @@ dependencies: langium "3.0.0" +"@microsoft/fetch-event-source@2.0.1": + version "2.0.1" + resolved "https://registry.yarnpkg.com/@microsoft/fetch-event-source/-/fetch-event-source-2.0.1.tgz#9ceecc94b49fbaa15666e38ae8587f64acce007d" + integrity sha512-W6CLUJ2eBMw3Rec70qrsEW0jOm/3twwJv21mrmj2yORiaVmVYGS4sSS5yUwvQc1ZlDLYGPnClVWmUUMagKNsfA== + "@module-federation/runtime-tools@0.5.1": version "0.5.1" resolved "https://registry.yarnpkg.com/@module-federation/runtime-tools/-/runtime-tools-0.5.1.tgz#1b1f93837159a6bf0c0ba78730d589a5a8f74aa3" @@ -2480,6 +2536,16 @@ rc-resize-observer "^1.3.1" rc-util "^5.38.0" +"@reduxjs/toolkit@2.2.7": + version "2.2.7" + resolved "https://registry.yarnpkg.com/@reduxjs/toolkit/-/toolkit-2.2.7.tgz#199e3d10ccb39267cb5aee92c0262fd9da7fdfb2" + integrity sha512-faI3cZbSdFb8yv9dhDTmGwclW0vk0z5o1cia+kf7gCbaCwHI5e+7tP57mJUv22pNcNbeA62GSrPpfrUfdXcQ6g== + dependencies: + immer "^10.0.3" + redux "^5.0.1" + redux-thunk "^3.1.0" + reselect "^5.1.0" + "@remix-run/router@1.21.0": version "1.21.0" resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.21.0.tgz#c65ae4262bdcfe415dbd4f64ec87676e4a56e2b5" @@ -3490,6 +3556,11 @@ resolved "https://registry.yarnpkg.com/@types/unist/-/unist-2.0.11.tgz#11af57b127e32487774841f7a4e54eab166d03c4" integrity sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA== +"@types/uuid@^9.0.0": + version "9.0.8" + resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.8.tgz#7545ba4fc3c003d6c756f651f3bf163d8f0f29ba" + integrity sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA== + "@types/ws@^8.5.5": version "8.5.13" resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.13.tgz#6414c280875e2691d0d1e080b05addbf5cb91e20" @@ -3645,6 +3716,23 @@ resolved "https://registry.yarnpkg.com/@xtuc/long/-/long-4.2.2.tgz#d291c6a4e97989b5c61d9acf396ae4fe133a718d" integrity sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ== +abab@2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/abab/-/abab-2.0.6.tgz#41b80f2c871d19686216b82309231cfd3cb3d291" + integrity sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA== + +abort-controller@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392" + integrity sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg== + dependencies: + event-target-shim "^5.0.0" + +abortcontroller-polyfill@1.7.5: + version "1.7.5" + resolved "https://registry.yarnpkg.com/abortcontroller-polyfill/-/abortcontroller-polyfill-1.7.5.tgz#6738495f4e901fbb57b6c0611d0c75f76c485bed" + integrity sha512-JMJ5soJWP18htbbxJjG7bG6yuI6pRhgJ0scHHTfkUjf6wjP912xZWvM+A4sJK3gqd9E8fcPbDnOefbA9Th/FIQ== + accepts@~1.3.4, accepts@~1.3.8: version "1.3.8" resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.8.tgz#0bf0be125b67014adcb0b0921e62db7bffe16b2e" @@ -3932,6 +4020,11 @@ at-least-node@^1.0.0: resolved "https://registry.yarnpkg.com/at-least-node/-/at-least-node-1.0.0.tgz#602cd4b46e844ad4effc92a8011a3c46e0238dc2" integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg== +atomic-sleep@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/atomic-sleep/-/atomic-sleep-1.0.0.tgz#eb85b77a601fc932cfe432c5acd364a9e2c9075b" + integrity sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ== + atomically@^2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/atomically/-/atomically-2.0.3.tgz#27e47bbe39994d324918491ba7c0edb7783e56cb" @@ -4008,6 +4101,11 @@ balanced-match@^1.0.0: resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== +base64-js@^1.3.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" + integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== + batch@0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/batch/-/batch-0.6.1.tgz#dc34314f4e679318093fc760272525f94bf25c16" @@ -4126,6 +4224,14 @@ buffer-from@^1.0.0: resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== +buffer@^6.0.3: + version "6.0.3" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-6.0.3.tgz#2ace578459cc8fbe2a70aaa8f52ee63b6a74c6c6" + integrity sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA== + dependencies: + base64-js "^1.3.1" + ieee754 "^1.2.1" + bytes@3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.0.0.tgz#d32815404d689699f85a4ea4fa8755dd13a96048" @@ -4614,6 +4720,23 @@ cosmiconfig@^8.1.3, cosmiconfig@^8.3.5: parse-json "^5.2.0" path-type "^4.0.0" +coveo.analytics@2.30.38: + version "2.30.38" + resolved "https://registry.yarnpkg.com/coveo.analytics/-/coveo.analytics-2.30.38.tgz#9f4897462046766a61c6fac0213062829f74fcf0" + integrity sha512-CxiBWV7XxDNAyCWS7gwikHjJYz8NigYVHSkGU23JkcQf2oK0XJEsxcU/eRN3VRBfLLmhBTRZbpWJ1SW2imDovQ== + dependencies: + "@types/uuid" "^9.0.0" + cross-fetch "^3.1.5" + react-native-get-random-values "^1.11.0" + uuid "^9.0.0" + +cross-fetch@^3.1.5: + version "3.2.0" + resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-3.2.0.tgz#34e9192f53bc757d6614304d9e5e6fb4edb782e3" + integrity sha512-Q+xVJLoGOeIMXZmbUK4HYk+69cQH6LudR0Vu/pRm2YlU/hDV9CiS0gKUMaWY5f2NeUH9C1nV3bsTlCo0FsTV1Q== + dependencies: + node-fetch "^2.7.0" + cross-spawn@^7.0.2, cross-spawn@^7.0.3: version "7.0.6" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" @@ -5134,6 +5257,11 @@ data-view-byte-offset@^1.0.0: es-errors "^1.3.0" is-data-view "^1.0.1" +dayjs@1.11.12: + version "1.11.12" + resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.12.tgz#5245226cc7f40a15bf52e0b99fd2a04669ccac1d" + integrity sha512-Rt2g+nTbLlDWZTwwrIXjy9MeiZmSDI375FvZs72ngxx8PDC6YXOeR3q5LAuPzjZQxhiWdRKac7RKV+YyQYfYIg== + dayjs@^1.11.10: version "1.11.13" resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.13.tgz#92430b0139055c3ebb60150aa13e860a4b5a366c" @@ -5905,12 +6033,17 @@ eval@^0.1.8: "@types/node" "*" require-like ">= 0.1.1" +event-target-shim@^5.0.0: + version "5.0.1" + resolved "https://registry.yarnpkg.com/event-target-shim/-/event-target-shim-5.0.1.tgz#5d4d3ebdf9583d63a5333ce2deb7480ab2b05789" + integrity sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ== + eventemitter3@^4.0.0: version "4.0.7" resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f" integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== -events@^3.2.0: +events@^3.2.0, events@^3.3.0: version "3.3.0" resolved "https://registry.yarnpkg.com/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400" integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q== @@ -5935,6 +6068,11 @@ exenv@^1.2.0: resolved "https://registry.yarnpkg.com/exenv/-/exenv-1.2.2.tgz#2ae78e85d9894158670b03d47bec1f03bd91bb9d" integrity sha512-Z+ktTxTwv9ILfgKCk32OX3n/doe+OcLTRtqK9pcL+JsP3J1/VW8Uvl4ZjLlKqeW4rzK4oesDOGMEMRIZqtP4Iw== +exponential-backoff@3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/exponential-backoff/-/exponential-backoff-3.1.0.tgz#9409c7e579131f8bd4b32d7d8094a911040f2e68" + integrity sha512-oBuz5SYz5zzyuHINoe9ooePwSu0xApKWgeNzok4hZ5YKXFh9zrQBEM15CXqoZkJJPuI2ArvqjPQd8UKJA753XA== + express@^4.17.3: version "4.21.1" resolved "https://registry.yarnpkg.com/express/-/express-4.21.1.tgz#9dae5dda832f16b4eec941a4e44aa89ec481b281" @@ -5984,11 +6122,21 @@ extend@^3.0.0: resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== +fast-base64-decode@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fast-base64-decode/-/fast-base64-decode-1.0.0.tgz#b434a0dd7d92b12b43f26819300d2dafb83ee418" + integrity sha512-qwaScUgUGBYeDNRnbc/KyllVU88Jk1pRHPStuF/lO7B0/RTRLj7U0lkdTAutlBblY08rwZDff6tNU9cjv6j//Q== + fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: version "3.1.3" resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== +fast-equals@5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/fast-equals/-/fast-equals-5.0.1.tgz#a4eefe3c5d1c0d021aeed0bc10ba5e0c12ee405d" + integrity sha512-WF1Wi8PwwSY7/6Kx0vKXtw8RwuSGoM1bvDaJbu7MxDlR1vovZjIAKrnzyrThgAjm6JDTu0fVgWXDlMGspodfoQ== + fast-glob@^3.2.11, fast-glob@^3.2.9, fast-glob@^3.3.0: version "3.3.2" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" @@ -6010,6 +6158,11 @@ fast-levenshtein@^2.0.6: resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== +fast-redact@^3.1.1: + version "3.5.0" + resolved "https://registry.yarnpkg.com/fast-redact/-/fast-redact-3.5.0.tgz#e9ea02f7e57d0cd8438180083e93077e496285e4" + integrity sha512-dwsoQlS7h9hMeYUq1W++23NDcBLV4KqONnITDV9DjfS3q1SgDGVrBdvvTLUotWtPSD7asWDV9/CmsZPy8Hf70A== + fast-uri@^3.0.1: version "3.0.3" resolved "https://registry.yarnpkg.com/fast-uri/-/fast-uri-3.0.3.tgz#892a1c91802d5d7860de728f18608a0573142241" @@ -6877,6 +7030,11 @@ icss-utils@^5.0.0, icss-utils@^5.1.0: resolved "https://registry.yarnpkg.com/icss-utils/-/icss-utils-5.1.0.tgz#c6be6858abd013d768e98366ae47e25d5887b1ae" integrity sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA== +ieee754@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" + integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== + ignore@^5.2.0, ignore@^5.2.4: version "5.3.2" resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5" @@ -6889,6 +7047,11 @@ image-size@^1.0.2: dependencies: queue "6.0.2" +immer@^10.0.3: + version "10.1.1" + resolved "https://registry.yarnpkg.com/immer/-/immer-10.1.1.tgz#206f344ea372d8ea176891545ee53ccc062db7bc" + integrity sha512-s2MPrmjovJcoMaHtx6K11Ra7oD05NT97w1IC5zpMkT6Atjr7H8LjaDd81iIxUYpMKSRRNMJE703M1Fhr/TctHw== + immer@^9.0.7: version "9.0.21" resolved "https://registry.yarnpkg.com/immer/-/immer-9.0.21.tgz#1e025ea31a40f24fb064f1fef23e931496330176" @@ -8672,6 +8835,11 @@ natural-compare@^1.4.0: resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" integrity sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw== +navigator.sendbeacon@0.0.20: + version "0.0.20" + resolved "https://registry.yarnpkg.com/navigator.sendbeacon/-/navigator.sendbeacon-0.0.20.tgz#d9e31acb2617b6190bfdb825491a64566f421391" + integrity sha512-a2fmNpIMnA9pN8Jv9f4FQZaakEWwdln+YBPydawxnYo6vzlUvYLlRUnPagF++29+rdgVpAtK6hXVSmTgkRgRJw== + negotiator@0.6.3: version "0.6.3" resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.3.tgz#58e323a72fedc0d6f9cd4d31fe49f51479590ccd" @@ -8702,6 +8870,11 @@ no-case@^3.0.4: lower-case "^2.0.2" tslib "^2.0.3" +node-abort-controller@^3.0.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/node-abort-controller/-/node-abort-controller-3.1.1.tgz#a94377e964a9a37ac3976d848cb5c765833b8548" + integrity sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ== + node-addon-api@^7.0.0: version "7.1.1" resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-7.1.1.tgz#1aba6693b0f255258a049d621329329322aad558" @@ -8717,7 +8890,7 @@ node-emoji@^2.1.0: emojilib "^2.4.0" skin-tone "^2.0.0" -node-fetch@^2.6.7: +node-fetch@^2.6.7, node-fetch@^2.7.0: version "2.7.0" resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d" integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A== @@ -8839,6 +9012,11 @@ obuf@^1.0.0, obuf@^1.1.2: resolved "https://registry.yarnpkg.com/obuf/-/obuf-1.1.2.tgz#09bea3343d41859ebd446292d11c9d4db619084e" integrity sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg== +on-exit-leak-free@^2.1.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/on-exit-leak-free/-/on-exit-leak-free-2.1.2.tgz#fed195c9ebddb7d9e4c3842f93f281ac8dadd3b8" + integrity sha512-0eJJY6hXLGf1udHwfNftBqH+g73EU4B504nZeKpz1sYRKafAghwxEJunB2O7rDZkL4PGfsMVnTXZ2EjibbqcsA== + on-finished@2.4.1: version "2.4.1" resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.4.1.tgz#58c8c44116e54845ad57f14ab10b03533184ac3f" @@ -9132,6 +9310,36 @@ picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3, picomatch@^2.3.1: resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== +pino-abstract-transport@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/pino-abstract-transport/-/pino-abstract-transport-1.2.0.tgz#97f9f2631931e242da531b5c66d3079c12c9d1b5" + integrity sha512-Guhh8EZfPCfH+PMXAb6rKOjGQEoy0xlAIn+irODG5kgfYV+BQ0rGYYWTIel3P5mmyXqkYkPmdIkywsn6QKUR1Q== + dependencies: + readable-stream "^4.0.0" + split2 "^4.0.0" + +pino-std-serializers@^6.0.0: + version "6.2.2" + resolved "https://registry.yarnpkg.com/pino-std-serializers/-/pino-std-serializers-6.2.2.tgz#d9a9b5f2b9a402486a5fc4db0a737570a860aab3" + integrity sha512-cHjPPsE+vhj/tnhCy/wiMh3M3z3h/j15zHQX+S9GkTBgqJuTuJzYJ4gUyACLhDaJ7kk9ba9iRDmbH2tJU03OiA== + +pino@8.21.0: + version "8.21.0" + resolved "https://registry.yarnpkg.com/pino/-/pino-8.21.0.tgz#e1207f3675a2722940d62da79a7a55a98409f00d" + integrity sha512-ip4qdzjkAyDDZklUaZkcRFb2iA118H9SgRh8yzTkSQK8HilsOJF7rSY8HoW5+I0M46AZgX/pxbprf2vvzQCE0Q== + dependencies: + atomic-sleep "^1.0.0" + fast-redact "^3.1.1" + on-exit-leak-free "^2.1.0" + pino-abstract-transport "^1.2.0" + pino-std-serializers "^6.0.0" + process-warning "^3.0.0" + quick-format-unescaped "^4.0.3" + real-require "^0.2.0" + safe-stable-stringify "^2.3.1" + sonic-boom "^3.7.0" + thread-stream "^2.6.0" + pkg-dir@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-7.0.0.tgz#8f0c08d6df4476756c5ff29b3282d0bab7517d11" @@ -9765,6 +9973,16 @@ process-nextick-args@~2.0.0: resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== +process-warning@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/process-warning/-/process-warning-3.0.0.tgz#96e5b88884187a1dce6f5c3166d611132058710b" + integrity sha512-mqn0kFRl0EoqhnL0GQ0veqFHyIN1yig9RHh/InzORTUiZHFRAur+aMtRkELNwGs9aNwKS6tg/An4NYBPGwvtzQ== + +process@^0.11.10: + version "0.11.10" + resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" + integrity sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A== + prompts@^2.4.2: version "2.4.2" resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.2.tgz#7b57e73b3a48029ad10ebd44f74b01722a4cb069" @@ -9831,6 +10049,11 @@ queue@6.0.2: dependencies: inherits "~2.0.3" +quick-format-unescaped@^4.0.3: + version "4.0.4" + resolved "https://registry.yarnpkg.com/quick-format-unescaped/-/quick-format-unescaped-4.0.4.tgz#93ef6dd8d3453cbc7970dd614fad4c5954d6b5a7" + integrity sha512-tYC1Q1hgyRuHgloV/YXs2w15unPVh8qfu/qCTfhTYamaw7fyhumKa2yGpdSo87vY32rIclj+4fWYQXUMs9EHvg== + quick-lru@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/quick-lru/-/quick-lru-5.1.1.tgz#366493e6b3e42a3a6885e2e99d18f80fb7a8c932" @@ -10037,6 +10260,13 @@ react-modal@^3.16.1: react-lifecycles-compat "^3.0.0" warning "^4.0.3" +react-native-get-random-values@^1.11.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/react-native-get-random-values/-/react-native-get-random-values-1.11.0.tgz#1ca70d1271f4b08af92958803b89dccbda78728d" + integrity sha512-4BTbDbRmS7iPdhYLRcz3PGFIpFJBwNZg9g42iwa2P6FOv9vZj/xJc678RZXnLNZzd0qd7Q3CCF6Yd+CU2eoXKQ== + dependencies: + fast-base64-decode "^1.0.0" + react-router-config@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/react-router-config/-/react-router-config-5.1.1.tgz#0f4263d1a80c6b2dc7b9c1902c9526478194a988" @@ -10116,6 +10346,17 @@ readable-stream@^3.0.6: string_decoder "^1.1.1" util-deprecate "^1.0.1" +readable-stream@^4.0.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-4.6.0.tgz#ce412dfb19c04efde1c5936d99c27f37a1ff94c9" + integrity sha512-cbAdYt0VcnpN2Bekq7PU+k363ZRsPwJoEEJOEtSJQlJXzwaxt3FIo/uL+KeDSGIjJqtkwyge4KQgD2S2kd+CQw== + dependencies: + abort-controller "^3.0.0" + buffer "^6.0.3" + events "^3.3.0" + process "^0.11.10" + string_decoder "^1.3.0" + readdirp@^4.0.1: version "4.0.2" resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-4.0.2.tgz#388fccb8b75665da3abffe2d8f8ed59fe74c230a" @@ -10133,6 +10374,11 @@ reading-time@^1.5.0: resolved "https://registry.yarnpkg.com/reading-time/-/reading-time-1.5.0.tgz#d2a7f1b6057cb2e169beaf87113cc3411b5bc5bb" integrity sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg== +real-require@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/real-require/-/real-require-0.2.0.tgz#209632dea1810be2ae063a6ac084fee7e33fba78" + integrity sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg== + rechoir@^0.6.2: version "0.6.2" resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.6.2.tgz#85204b54dba82d5742e28c96756ef43af50e3384" @@ -10187,6 +10433,16 @@ recursive-readdir@^2.2.2: dependencies: minimatch "^3.0.5" +redux-thunk@3.1.0, redux-thunk@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/redux-thunk/-/redux-thunk-3.1.0.tgz#94aa6e04977c30e14e892eae84978c1af6058ff3" + integrity sha512-NW2r5T6ksUKXCabzhL9z+h206HQw/NJkcLm1GPImRQ8IzfXwRGqjVhKJGauHirT0DAuyy6hjdnMZaRoAcy0Klw== + +redux@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/redux/-/redux-5.0.1.tgz#97fa26881ce5746500125585d5642c77b6e9447b" + integrity sha512-M9/ELqF6fy8FwmkpnF0S3YKOqMyoWJ4+CS5Efg2ct3oY9daQvd/Pc71FpGZsVsbl3Cpb+IIcjBDUnnyBdQbq4w== + reflect.getprototypeof@^1.0.4, reflect.getprototypeof@^1.0.6: version "1.0.7" resolved "https://registry.yarnpkg.com/reflect.getprototypeof/-/reflect.getprototypeof-1.0.7.tgz#04311b33a1b713ca5eb7b5aed9950a86481858e5" @@ -10430,6 +10686,11 @@ requires-port@^1.0.0: resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" integrity sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ== +reselect@^5.1.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/reselect/-/reselect-5.1.1.tgz#c766b1eb5d558291e5e550298adb0becc24bb72e" + integrity sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w== + resize-observer-polyfill@^1.5.1: version "1.5.1" resolved "https://registry.yarnpkg.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz#0e9020dd3d21024458d4ebd27e23e40269810464" @@ -10563,6 +10824,11 @@ safe-regex-test@^1.0.3: es-errors "^1.3.0" is-regex "^1.1.4" +safe-stable-stringify@^2.3.1: + version "2.5.0" + resolved "https://registry.yarnpkg.com/safe-stable-stringify/-/safe-stable-stringify-2.5.0.tgz#4ca2f8e385f2831c432a719b108a3bf7af42a1dd" + integrity sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA== + "safer-buffer@>= 2.1.2 < 3", "safer-buffer@>= 2.1.2 < 3.0.0": version "2.1.2" resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" @@ -10869,6 +11135,13 @@ sockjs@^0.3.24: uuid "^8.3.2" websocket-driver "^0.7.4" +sonic-boom@^3.7.0: + version "3.8.1" + resolved "https://registry.yarnpkg.com/sonic-boom/-/sonic-boom-3.8.1.tgz#d5ba8c4e26d6176c9a1d14d549d9ff579a163422" + integrity sha512-y4Z8LCDBuum+PBP3lSV7RHrXscqksve/bi0as7mhwVnBW+/wUqKT/2Kb7um8yqcFy0duYbbPxzt89Zy2nOCaxg== + dependencies: + atomic-sleep "^1.0.0" + sort-css-media-queries@2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/sort-css-media-queries/-/sort-css-media-queries-2.2.0.tgz#aa33cf4a08e0225059448b6c40eddbf9f1c8334c" @@ -10925,6 +11198,11 @@ spdy@^4.0.2: select-hose "^2.0.0" spdy-transport "^3.0.0" +split2@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/split2/-/split2-4.2.0.tgz#c9c5920904d148bab0b9f67145f245a86aadbfa4" + integrity sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg== + sprintf-js@~1.0.2: version "1.0.3" resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" @@ -11031,7 +11309,7 @@ string.prototype.trimstart@^1.0.8: define-properties "^1.2.1" es-object-atoms "^1.0.0" -string_decoder@^1.1.1: +string_decoder@^1.1.1, string_decoder@^1.3.0: version "1.3.0" resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== @@ -11208,6 +11486,13 @@ text-table@^0.2.0: resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw== +thread-stream@^2.6.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/thread-stream/-/thread-stream-2.7.0.tgz#d8a8e1b3fd538a6cca8ce69dbe5d3d097b601e11" + integrity sha512-qQiRWsU/wvNolI6tbbCKd9iKaTnCXsTwVxhhKM6nctPdujTyztjlbUkUTUymidWcMnZ5pWR0ej4a0tjsW021vw== + dependencies: + real-require "^0.2.0" + thunky@^1.0.2: version "1.1.0" resolved "https://registry.yarnpkg.com/thunky/-/thunky-1.1.0.tgz#5abaf714a9405db0504732bbccd2cedd9ef9537d" @@ -11260,6 +11545,11 @@ trough@^2.0.0: resolved "https://registry.yarnpkg.com/trough/-/trough-2.2.0.tgz#94a60bd6bd375c152c1df911a4b11d5b0256f50f" integrity sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw== +ts-debounce@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/ts-debounce/-/ts-debounce-4.0.0.tgz#33440ef64fab53793c3d546a8ca6ae539ec15841" + integrity sha512-+1iDGY6NmOGidq7i7xZGA4cm8DAa6fqdYcvO5Z6yBevH++Bdo9Qt/mN0TzHUgcCcKv1gmh9+W5dHqz8pMWbCbg== + ts-dedent@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/ts-dedent/-/ts-dedent-2.2.0.tgz#39e4bd297cd036292ae2394eb3412be63f563bb5" @@ -11387,6 +11677,13 @@ undici-types@~6.19.8: resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.19.8.tgz#35111c9d1437ab83a7cdc0abae2f26d88eda0a02" integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw== +undici@5.28.4: + version "5.28.4" + resolved "https://registry.yarnpkg.com/undici/-/undici-5.28.4.tgz#6b280408edb6a1a604a9b20340f45b422e373068" + integrity sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g== + dependencies: + "@fastify/busboy" "^2.0.0" + unfetch@^3.1.1: version "3.1.2" resolved "https://registry.yarnpkg.com/unfetch/-/unfetch-3.1.2.tgz#dc271ef77a2800768f7b459673c5604b5101ef77" @@ -11606,7 +11903,7 @@ uuid@^8.3.2: resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== -uuid@^9.0.1: +uuid@^9.0.0, uuid@^9.0.1: version "9.0.1" resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30" integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==