diff --git a/docs/developer/writing-flow-component-documentation.md b/docs/developer/writing-flow-component-documentation.md index ddaf6466e0..ab1d304990 100644 --- a/docs/developer/writing-flow-component-documentation.md +++ b/docs/developer/writing-flow-component-documentation.md @@ -113,13 +113,13 @@ If documenting a beta component, include the following after the header, but before the description of the component: ```markdown -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} ``` If documenting an experimental component, include the following instead: ```markdown -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} ``` ### Usage diff --git a/docs/sources/_index.md b/docs/sources/_index.md index 605655ef6a..6cf14905df 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -1,73 +1,53 @@ --- -aliases: -- /docs/grafana-cloud/agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/ -- /docs/grafana-cloud/send-data/agent/ -canonical: https://grafana.com/docs/agent/latest/ -title: Grafana Agent -description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector +canonical: https://grafana.com/docs/alloy/latest/ +title: Grafana Alloy +description: Grafana Alloy is a flexible, performant, vendor-neutral, telemetry collector weight: 350 cascade: - AGENT_RELEASE: v0.40.0 + ALLOY_RELEASE: $ALLOY_VERSION OTEL_VERSION: v0.87.0 + PRODUCT_NAME: Grafana Alloy + PRODUCT_ROOT_NAME: Alloy --- -# Grafana Agent +# {{% param "PRODUCT_NAME" %}} -Grafana Agent is a vendor-neutral, batteries-included telemetry collector with -configuration inspired by [Terraform][]. It is designed to be flexible, -performant, and compatible with multiple ecosystems such as Prometheus and -OpenTelemetry. +{{< param "PRODUCT_NAME" >}} is a vendor-neutral, batteries-included telemetry collector with configuration inspired by [Terraform][]. +It is designed to be flexible, performant, and compatible with multiple ecosystems such as Prometheus and OpenTelemetry. -Grafana Agent is based around **components**. Components are wired together to -form programmable observability **pipelines** for telemetry collection, -processing, and delivery. +{{< param "PRODUCT_NAME" >}} is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, processing, and delivery. -{{< admonition type="note" >}} -This page focuses mainly on [Flow mode](https://grafana.com/docs/agent//flow/), the Terraform-inspired variant of Grafana Agent. - -For information on other variants of Grafana Agent, refer to [Introduction to Grafana Agent]({{< relref "./about.md" >}}). -{{< /admonition >}} - -Grafana Agent can collect, transform, and send data to: +{{< param "PRODUCT_NAME" >}} can collect, transform, and send data to: * The [Prometheus][] ecosystem * The [OpenTelemetry][] ecosystem * The Grafana open source ecosystem ([Loki][], [Grafana][], [Tempo][], [Mimir][], [Pyroscope][]) -[Terraform]: https://terraform.io -[Prometheus]: https://prometheus.io -[OpenTelemetry]: https://opentelemetry.io -[Loki]: https://github.com/grafana/loki -[Grafana]: https://github.com/grafana/grafana -[Tempo]: https://github.com/grafana/tempo -[Mimir]: https://github.com/grafana/mimir -[Pyroscope]: https://github.com/grafana/pyroscope +## Why use {{< param "PRODUCT_NAME" >}}? -## Why use Grafana Agent? - -* **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and - Grafana open source ecosystems. -* **Every signal**: Collect telemetry data for metrics, logs, traces, and - continuous profiles. -* **Scalable**: Deploy on any number of machines to collect millions of active - series and terabytes of logs. -* **Battle-tested**: Grafana Agent extends the existing battle-tested code from - the Prometheus and OpenTelemetry Collector projects. -* **Powerful**: Write programmable pipelines with ease, and debug them using a - [built-in UI][UI]. -* **Batteries included**: Integrate with systems like MySQL, Kubernetes, and - Apache to get telemetry that's immediately useful. +* **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and Grafana open source ecosystems. +* **Every signal**: Collect telemetry data for metrics, logs, traces, and continuous profiles. +* **Scalable**: Deploy on any number of machines to collect millions of active series and terabytes of logs. +* **Battle-tested**: {{< param "PRODUCT_NAME" >}} extends the existing battle-tested code from the Prometheus and OpenTelemetry Collector projects. +* **Powerful**: Write programmable pipelines with ease, and debug them using a [built-in UI][UI]. +* **Batteries included**: Integrate with systems like MySQL, Kubernetes, and Apache to get telemetry that's immediately useful. + + ## Supported platforms * Linux @@ -92,29 +72,19 @@ Grafana Agent can collect, transform, and send data to: ## Release cadence -A new minor release is planned every six weeks for the entire Grafana Agent -project, including Static mode, the Static mode Kubernetes operator, and Flow -mode. +A new minor release is planned every six weeks for the entire {{< param "PRODUCT_NAME" >}}. -The release cadence is best-effort: releases may be moved forwards or backwards -if needed. The planned release dates for future minor releases do not change if -one minor release is moved. +The release cadence is best-effort: releases may be moved forwards or backwards if needed. +The planned release dates for future minor releases do not change if one minor release is moved. Patch and security releases may be created at any time. -{{% docs/reference %}} -[variants]: "/docs/agent/ -> /docs/agent//about" -[variants]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/about" - -[Static mode]: "/docs/agent/ -> /docs/agent//static" -[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static" - -[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" -[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/operator" - -[Flow mode]: "/docs/agent/ -> /docs/agent//flow" -[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" - -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} +[Terraform]: https://terraform.io +[Prometheus]: https://prometheus.io +[OpenTelemetry]: https://opentelemetry.io +[Loki]: https://github.com/grafana/loki +[Grafana]: https://github.com/grafana/grafana +[Tempo]: https://github.com/grafana/tempo +[Mimir]: https://github.com/grafana/mimir +[Pyroscope]: https://github.com/grafana/pyroscope +[UI]: ./tasks/debug/#grafana-alloy-ui diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t index daf939a62a..6cf14905df 100644 --- a/docs/sources/_index.md.t +++ b/docs/sources/_index.md.t @@ -1,73 +1,53 @@ --- -aliases: -- /docs/grafana-cloud/agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/ -- /docs/grafana-cloud/send-data/agent/ -canonical: https://grafana.com/docs/agent/latest/ -title: Grafana Agent -description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector +canonical: https://grafana.com/docs/alloy/latest/ +title: Grafana Alloy +description: Grafana Alloy is a flexible, performant, vendor-neutral, telemetry collector weight: 350 cascade: - AGENT_RELEASE: $AGENT_VERSION + ALLOY_RELEASE: $ALLOY_VERSION OTEL_VERSION: v0.87.0 + PRODUCT_NAME: Grafana Alloy + PRODUCT_ROOT_NAME: Alloy --- -# Grafana Agent +# {{% param "PRODUCT_NAME" %}} -Grafana Agent is a vendor-neutral, batteries-included telemetry collector with -configuration inspired by [Terraform][]. It is designed to be flexible, -performant, and compatible with multiple ecosystems such as Prometheus and -OpenTelemetry. +{{< param "PRODUCT_NAME" >}} is a vendor-neutral, batteries-included telemetry collector with configuration inspired by [Terraform][]. +It is designed to be flexible, performant, and compatible with multiple ecosystems such as Prometheus and OpenTelemetry. -Grafana Agent is based around **components**. Components are wired together to -form programmable observability **pipelines** for telemetry collection, -processing, and delivery. +{{< param "PRODUCT_NAME" >}} is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, processing, and delivery. -{{< admonition type="note" >}} -This page focuses mainly on [Flow mode](https://grafana.com/docs/agent//flow/), the Terraform-inspired variant of Grafana Agent. - -For information on other variants of Grafana Agent, refer to [Introduction to Grafana Agent]({{< relref "./about.md" >}}). -{{< /admonition >}} - -Grafana Agent can collect, transform, and send data to: +{{< param "PRODUCT_NAME" >}} can collect, transform, and send data to: * The [Prometheus][] ecosystem * The [OpenTelemetry][] ecosystem * The Grafana open source ecosystem ([Loki][], [Grafana][], [Tempo][], [Mimir][], [Pyroscope][]) -[Terraform]: https://terraform.io -[Prometheus]: https://prometheus.io -[OpenTelemetry]: https://opentelemetry.io -[Loki]: https://github.com/grafana/loki -[Grafana]: https://github.com/grafana/grafana -[Tempo]: https://github.com/grafana/tempo -[Mimir]: https://github.com/grafana/mimir -[Pyroscope]: https://github.com/grafana/pyroscope +## Why use {{< param "PRODUCT_NAME" >}}? -## Why use Grafana Agent? - -* **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and - Grafana open source ecosystems. -* **Every signal**: Collect telemetry data for metrics, logs, traces, and - continuous profiles. -* **Scalable**: Deploy on any number of machines to collect millions of active - series and terabytes of logs. -* **Battle-tested**: Grafana Agent extends the existing battle-tested code from - the Prometheus and OpenTelemetry Collector projects. -* **Powerful**: Write programmable pipelines with ease, and debug them using a - [built-in UI][UI]. -* **Batteries included**: Integrate with systems like MySQL, Kubernetes, and - Apache to get telemetry that's immediately useful. +* **Vendor-neutral**: Fully compatible with the Prometheus, OpenTelemetry, and Grafana open source ecosystems. +* **Every signal**: Collect telemetry data for metrics, logs, traces, and continuous profiles. +* **Scalable**: Deploy on any number of machines to collect millions of active series and terabytes of logs. +* **Battle-tested**: {{< param "PRODUCT_NAME" >}} extends the existing battle-tested code from the Prometheus and OpenTelemetry Collector projects. +* **Powerful**: Write programmable pipelines with ease, and debug them using a [built-in UI][UI]. +* **Batteries included**: Integrate with systems like MySQL, Kubernetes, and Apache to get telemetry that's immediately useful. + + ## Supported platforms * Linux @@ -92,29 +72,19 @@ Grafana Agent can collect, transform, and send data to: ## Release cadence -A new minor release is planned every six weeks for the entire Grafana Agent -project, including Static mode, the Static mode Kubernetes operator, and Flow -mode. +A new minor release is planned every six weeks for the entire {{< param "PRODUCT_NAME" >}}. -The release cadence is best-effort: releases may be moved forwards or backwards -if needed. The planned release dates for future minor releases do not change if -one minor release is moved. +The release cadence is best-effort: releases may be moved forwards or backwards if needed. +The planned release dates for future minor releases do not change if one minor release is moved. Patch and security releases may be created at any time. -{{% docs/reference %}} -[variants]: "/docs/agent/ -> /docs/agent//about" -[variants]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/about" - -[Static mode]: "/docs/agent/ -> /docs/agent//static" -[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static" - -[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" -[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/operator" - -[Flow mode]: "/docs/agent/ -> /docs/agent//flow" -[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" - -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} +[Terraform]: https://terraform.io +[Prometheus]: https://prometheus.io +[OpenTelemetry]: https://opentelemetry.io +[Loki]: https://github.com/grafana/loki +[Grafana]: https://github.com/grafana/grafana +[Tempo]: https://github.com/grafana/tempo +[Mimir]: https://github.com/grafana/mimir +[Pyroscope]: https://github.com/grafana/pyroscope +[UI]: ./tasks/debug/#grafana-alloy-ui diff --git a/docs/sources/about.md b/docs/sources/about.md index eca262408d..a2df0de45c 100644 --- a/docs/sources/about.md +++ b/docs/sources/about.md @@ -1,53 +1,66 @@ --- -aliases: -- ./about-agent/ -- /docs/grafana-cloud/agent/about/ -- /docs/grafana-cloud/monitor-infrastructure/agent/about/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/about/ -- /docs/grafana-cloud/send-data/agent/about/ -canonical: https://grafana.com/docs/agent/latest/about/ -description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector +canonical: https://grafana.com/docs/alloy/latest/about/ +description: Grafana Alloy is a flexible, performant, vendor-neutral, telemetry collector menuTitle: Introduction -title: Introduction to Grafana Agent -weight: 100 +title: Introduction to Grafana Alloy +weight: 10 --- -# Introduction to Grafana Agent - -Grafana Agent is a flexible, high performance, vendor-neutral telemetry collector. It's fully compatible with the most popular open source observability standards such as OpenTelemetry (OTel) and Prometheus. - -Grafana Agent is available in three different variants: - -- [Static mode][]: The original Grafana Agent. -- [Static mode Kubernetes operator][]: The Kubernetes operator for Static mode. -- [Flow mode][]: The new, component-based Grafana Agent. - -{{% docs/reference %}} -[Static mode]: "/docs/agent/ -> /docs/agent//static" -[Static mode]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static" -[Static mode Kubernetes operator]: "/docs/agent/ -> /docs/agent//operator" -[Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/operator" -[Flow mode]: "/docs/agent/ -> /docs/agent//flow" -[Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" -[Prometheus]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-prometheus-metrics.md" -[Prometheus]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/collect-prometheus-metrics.md" -[OTel]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-opentelemetry-data.md" -[OTel]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/collect-opentelemetry-data.md" -[Loki]: "/docs/agent/ -> /docs/agent//flow/tasks/migrate/from-promtail.md" -[Loki]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/migrate/from-promtail.md" -[clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering/_index.md" -[clustering]: "/docs/grafana-cloud/ -> /docs/agent//flow/concepts/clustering/_index.md" -[rules]: "/docs/agent/ -> /docs/agent/latest/flow/reference/components/mimir.rules.kubernetes.md" -[rules]: "/docs/grafana-cloud/ -> /docs/agent/latest/flow/reference/components/mimir.rules.kubernetes.md" -[vault]: "/docs/agent/ -> /docs/agent//flow/reference/components/remote.vault.md" -[vault]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/components/remote.vault.md" -{{% /docs/reference %}} +# Introduction to {{% param "PRODUCT_NAME" %}} -[Pyroscope]: https://grafana.com/docs/pyroscope/latest/configure-client/grafana-agent/go_pull -[helm chart]: https://grafana.com/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/config-k8s-helmchart -[sla]: https://grafana.com/legal/grafana-cloud-sla -[observability]: https://grafana.com/docs/grafana-cloud/monitor-applications/application-observability/setup#send-telemetry +{{< param "PRODUCT_NAME" >}} is a flexible, high performance, vendor-neutral telemetry collector. It's fully compatible with the most popular open source observability standards such as OpenTelemetry (OTel) and Prometheus. + +{{< param "PRODUCT_NAME" >}} is a _component-based_ revision of {{< param "PRODUCT_ROOT_NAME" >}} with a focus on ease-of-use, +debuggability, and ability to adapt to the needs of power users. + +Components allow for reusability, composability, and focus on a single task. + +* **Reusability** allows for the output of components to be reused as the input for multiple other components. +* **Composability** allows for components to be chained together to form a pipeline. +* **Single task** means the scope of a component is limited to one narrow task and thus has fewer side effects. + +## Features + +* Write declarative configurations with a Terraform-inspired configuration language. +* Declare components to configure parts of a pipeline. +* Use expressions to bind components together to build a programmable pipeline. +* Includes a UI for debugging the state of a pipeline. + +## Example + +```river +// Discover Kubernetes pods to collect metrics from +discovery.kubernetes "pods" { + role = "pod" +} + +// Scrape metrics from Kubernetes pods and send to a prometheus.remote_write +// component. +prometheus.scrape "default" { + targets = discovery.kubernetes.pods.targets + forward_to = [prometheus.remote_write.default.receiver] +} + +// Get an API key from disk. +local.file "apikey" { + filename = "/var/data/my-api-key.txt" + is_secret = true +} + +// Collect and send metrics to a Prometheus remote_write endpoint. +prometheus.remote_write "default" { + endpoint { + url = "http://localhost:9009/api/prom/push" + + basic_auth { + username = "MY_USERNAME" + password = local.file.apikey.content + } + } +} +``` + -## Choose which variant of Grafana Agent to run +## {{% param "PRODUCT_NAME" %}} configuration generator + +The {{< param "PRODUCT_NAME" >}} [configuration generator][] helps you get a head start on creating flow code. + +{{< admonition type="note" >}} +This feature is experimental, and it doesn't support all River components. +{{< /admonition >}} + +## Next steps + +* [Install][] {{< param "PRODUCT_NAME" >}}. +* Learn about the core [Concepts][] of {{< param "PRODUCT_NAME" >}}. +* Follow the [Tutorials][] for hands-on learning of {{< param "PRODUCT_NAME" >}}. +* Consult the [Tasks][] instructions to accomplish common objectives with {{< param "PRODUCT_NAME" >}}. +* Check out the [Reference][] documentation to find specific information you might be looking for. + +[configuration generator]: https://grafana.github.io/agent-configurator/ +[Install]: ../get-started/install/ +[Concepts]: ../concepts/ +[Tasks]: ../tasks/ +[Tutorials]: ../tutorials/ +[Reference]: ../reference/ + + ### BoringCrypto -[BoringCrypto](https://pkg.go.dev/crypto/internal/boring) is an **EXPERIMENTAL** feature for building Grafana Agent +[BoringCrypto][] is an **EXPERIMENTAL** feature for building {{< param "PRODUCT_NAME" >}} binaries and images with BoringCrypto enabled. Builds and Docker images for Linux arm64/amd64 are made available. -{{% docs/reference %}} -[integrations]: "/docs/agent/ -> /docs/agent//static/configuration/integrations" -[integrations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/integrations" - -[components]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[components]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/components" -{{% /docs/reference %}} +[BoringCrypto]: https://pkg.go.dev/crypto/internal/boring diff --git a/docs/sources/concepts/_index.md b/docs/sources/concepts/_index.md new file mode 100644 index 0000000000..02cc0534d1 --- /dev/null +++ b/docs/sources/concepts/_index.md @@ -0,0 +1,12 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/concepts/ +description: Learn about the Grafana Alloy concepts +title: Concepts +weight: 100 +--- + +# Concepts + +This section explains the primary concepts of {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/concepts/clustering.md b/docs/sources/concepts/clustering.md new file mode 100644 index 0000000000..1d930287f9 --- /dev/null +++ b/docs/sources/concepts/clustering.md @@ -0,0 +1,69 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/concepts/clustering/ +description: Learn about Grafana Alloy clustering concepts +labels: + stage: beta +menuTitle: Clustering +title: Clustering (beta) +weight: 500 +--- + +# Clustering (beta) + +Clustering enables a fleet of {{< param "PRODUCT_ROOT_NAME" >}}s to work together for workload distribution and high availability. +It helps create horizontally scalable deployments with minimal resource and operational overhead. + +To achieve this, {{< param "PRODUCT_NAME" >}} makes use of an eventually consistent model that assumes all participating {{< param "PRODUCT_ROOT_NAME" >}}s are interchangeable and converge on using the same configuration file. + +The behavior of a standalone, non-clustered {{< param "PRODUCT_ROOT_NAME" >}} is the same as if it were a single-node cluster. + +You configure clustering by passing `cluster` command-line flags to the [run][] command. + +## Use cases + +### Target auto-distribution + +Target auto-distribution is the most basic use case of clustering. +It allows scraping components running on all peers to distribute the scrape load between themselves. +Target auto-distribution requires that all {{< param "PRODUCT_ROOT_NAME" >}} in the same cluster can reach the same service discovery APIs and scrape the same targets. + +You must explicitly enable target auto-distribution on components by defining a `clustering` block. + +```river +prometheus.scrape "default" { + clustering { + enabled = true + } + + ... +} +``` + +A cluster state change is detected when a new node joins or an existing node leaves. +All participating components locally recalculate target ownership and re-balance the number of targets they’re scraping without explicitly communicating ownership over the network. + +Target auto-distribution allows you to dynamically scale the number of {{< param "PRODUCT_ROOT_NAME" >}}s to distribute workload during peaks. +It also provides resiliency because targets are automatically picked up by one of the node peers if a node leaves. + +{{< param "PRODUCT_NAME" >}} uses a local consistent hashing algorithm to distribute targets, meaning that, on average, only ~1/N of the targets are redistributed. + +Refer to component reference documentation to discover whether it supports clustering, such as: + +- [prometheus.scrape][] +- [pyroscope.scrape][] +- [prometheus.operator.podmonitors][] +- [prometheus.operator.servicemonitors][] + +## Cluster monitoring and troubleshooting + +You can use the {{< param "PRODUCT_NAME" >}} UI [clustering page][] to monitor your cluster status. +Refer to [Debugging clustering issues][debugging] for additional troubleshooting information. + + +[run]: ../../reference/cli/run/#clustering-beta +[prometheus.scrape]: ../../reference/components/prometheus.scrape/#clustering-beta +[pyroscope.scrape]: ../../reference/components/pyroscope.scrape/#clustering-beta +[prometheus.operator.podmonitors]: ../../reference/components/prometheus.operator.podmonitors/#clustering-beta +[prometheus.operator.servicemonitors]: ../../reference/components/prometheus.operator.servicemonitors/#clustering-beta +[clustering page]: ../../tasks/debug/#clustering-page +[debugging]: ../../tasks/debug/#debugging-clustering-issues diff --git a/docs/sources/flow/concepts/component_controller.md b/docs/sources/concepts/component_controller.md similarity index 81% rename from docs/sources/flow/concepts/component_controller.md rename to docs/sources/concepts/component_controller.md index 1a19e13b49..b1474bbe23 100644 --- a/docs/sources/flow/concepts/component_controller.md +++ b/docs/sources/concepts/component_controller.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../concepts/component-controller/ -- /docs/grafana-cloud/agent/flow/concepts/component_controller/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/component_controller/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/component_controller/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/component_controller/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/component_controller/ +canonical: https://grafana.com/docs/alloy/latest/concepts/component_controller/ description: Learn about the component controller title: Component controller weight: 200 @@ -114,12 +108,6 @@ removing components no longer defined in the configuration file and creating new All components managed by the controller are reevaluated after reloading. [DAG]: https://en.wikipedia.org/wiki/Directed_acyclic_graph - -{{% docs/reference %}} -[prometheus.exporter.unix]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.exporter.unix.md" -[prometheus.exporter.unix]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.unix.md" -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -{{% /docs/reference %}} +[prometheus.exporter.unix]: ../../reference/components/prometheus.exporter.unix +[run]: ../../reference/cli/run/ +[Components]: ../components/ diff --git a/docs/sources/flow/concepts/components.md b/docs/sources/concepts/components.md similarity index 91% rename from docs/sources/flow/concepts/components.md rename to docs/sources/concepts/components.md index 1f93d76811..88b9ce223d 100644 --- a/docs/sources/flow/concepts/components.md +++ b/docs/sources/concepts/components.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../concepts/components/ -- /docs/grafana-cloud/agent/flow/concepts/components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/components/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/components/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/components/ +canonical: https://grafana.com/docs/alloy/latest/concepts/components/ description: Learn about components title: Components weight: 100 diff --git a/docs/sources/flow/concepts/config-language/_index.md b/docs/sources/concepts/config-language/_index.md similarity index 77% rename from docs/sources/flow/concepts/config-language/_index.md rename to docs/sources/concepts/config-language/_index.md index 80699732f3..799b4586fc 100644 --- a/docs/sources/flow/concepts/config-language/_index.md +++ b/docs/sources/concepts/config-language/_index.md @@ -1,22 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/concepts/config-language/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/ -- configuration-language/ # /docs/agent/latest/flow/concepts/configuration-language/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/config-language/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/ -- ../configuration-language/ # /docs/agent/latest/flow/configuration-language/ -- ../concepts/configuration_language/ # /docs/agent/latest/flow/concepts/configuration_language/ -- /docs/grafana-cloud/agent/flow/concepts/configuration_language/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/configuration_language/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/configuration_language/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/configuration_language/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/ description: Learn about the configuration language title: Configuration language weight: 10 @@ -140,8 +123,4 @@ You can also start developing your own tooling using the {{< param "PRODUCT_ROOT [VSCode]: https://github.com/rfratto/vscode-river [river-mode]: https://github.com/jdbaldry/river-mode [tree-sitter grammar]: https://github.com/grafana/tree-sitter-river - -{{% docs/reference %}} -[fmt]: "/docs/agent/ -> /docs/agent//flow/reference/cli/fmt" -[fmt]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/fmt" -{{% /docs/reference %}} \ No newline at end of file +[fmt]: ../../reference/cli/fmt/ diff --git a/docs/sources/flow/concepts/config-language/components.md b/docs/sources/concepts/config-language/components.md similarity index 67% rename from docs/sources/flow/concepts/config-language/components.md rename to docs/sources/concepts/config-language/components.md index 967d2437da..bb2e609031 100644 --- a/docs/sources/flow/concepts/config-language/components.md +++ b/docs/sources/concepts/config-language/components.md @@ -1,17 +1,5 @@ --- -aliases: -- ../configuration-language/components/ # /docs/agent/latest/flow/concepts/configuration-language/components/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/components/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/components/ -# Previous page aliases for backwards compatibility: -- ../../configuration-language/components/ # /docs/agent/latest/flow/configuration-language/components/ -- /docs/grafana-cloud/agent/flow/config-language/components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/components/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/components/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/components/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/components/ description: Learn about the components configuration language title: Components configuration language weight: 300 @@ -94,11 +82,6 @@ The documentation of each [component][components] provides more information abou In the previous example, the contents of the `local.file.targets.content` expression is evaluated to a concrete value. The value is type-checked and substituted into `prometheus.scrape.default`, where you can configure it. -{{% docs/reference %}} -[components]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components" -[controller]: "/docs/agent/ -> /docs/agent//flow/concepts/component_controller" -[controller]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/component_controller" -[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" -[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" -{{% /docs/reference %}} \ No newline at end of file +[components]: ../../../reference/components/ +[controller]: ../../component_controller/ +[type]: ../expressions/types_and_values/ diff --git a/docs/sources/concepts/config-language/expressions/_index.md b/docs/sources/concepts/config-language/expressions/_index.md new file mode 100644 index 0000000000..f91c8aaa2f --- /dev/null +++ b/docs/sources/concepts/config-language/expressions/_index.md @@ -0,0 +1,21 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/expressions/ +description: Learn about expressions +title: Expressions +weight: 400 +--- + +# Expressions + +Expressions represent or compute values you can assign to attributes within a configuration. + +Basic expressions are literal values, like `"Hello, world!"` or `true`. +Expressions may also do things like [refer to values][] exported by components, perform arithmetic, or [call functions][]. + +You use expressions when you configure any component. +All component arguments have an underlying [type][]. +River checks the expression type before assigning the result to an attribute. + +[refer to values]: ./referencing_exports/ +[call functions]: ./function_calls/ +[type]: ./types_and_values/ diff --git a/docs/sources/concepts/config-language/expressions/function_calls.md b/docs/sources/concepts/config-language/expressions/function_calls.md new file mode 100644 index 0000000000..da05a2c8f4 --- /dev/null +++ b/docs/sources/concepts/config-language/expressions/function_calls.md @@ -0,0 +1,28 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/expressions/function_calls/ +description: Learn about function calls +title: Function calls +weight: 400 +--- + +# Function calls + +You can use River function calls to build richer expressions. + +Functions take zero or more arguments as their input and always return a single value as their output. +You can't construct functions. You can call functions from River's standard library or export them from a component. + +If a function fails, the expression isn't evaluated, and an error is reported. + +## Standard library functions + +River contains a [standard library][] of functions. +Some functions enable interaction with the host system, for example, reading from an environment variable. +Some functions allow for more complex expressions, for example, concatenating arrays or decoding JSON strings into objects. + +```river +env("HOME") +json_decode(local.file.cfg.content)["namespace"] +``` + +[standard library]:../../../../reference/stdlib/ diff --git a/docs/sources/flow/concepts/config-language/expressions/operators.md b/docs/sources/concepts/config-language/expressions/operators.md similarity index 77% rename from docs/sources/flow/concepts/config-language/expressions/operators.md rename to docs/sources/concepts/config-language/expressions/operators.md index 19bb003f74..a554345f04 100644 --- a/docs/sources/flow/concepts/config-language/expressions/operators.md +++ b/docs/sources/concepts/config-language/expressions/operators.md @@ -1,17 +1,5 @@ --- -aliases: -- ../../configuration-language/expressions/operators/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/operators/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/operators/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/operators/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/operators/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/operators/ -# Previous page aliases for backwards compatibility: -- ../../../configuration-language/expressions/operators/ # /docs/agent/latest/flow/configuration-language/expressions/operators/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/operators/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/operators/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/operators/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/operators/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/operators/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/expressions/operators/ description: Learn about operators title: Operators weight: 300 diff --git a/docs/sources/flow/concepts/config-language/expressions/referencing_exports.md b/docs/sources/concepts/config-language/expressions/referencing_exports.md similarity index 51% rename from docs/sources/flow/concepts/config-language/expressions/referencing_exports.md rename to docs/sources/concepts/config-language/expressions/referencing_exports.md index 2cc7a8ca5b..00f7030f80 100644 --- a/docs/sources/flow/concepts/config-language/expressions/referencing_exports.md +++ b/docs/sources/concepts/config-language/expressions/referencing_exports.md @@ -1,17 +1,5 @@ --- -aliases: -- ../../configuration-language/expressions/referencing-exports/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/referencing-exports/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/referencing_exports/ -# Previous page aliases for backwards compatibility: -- ../../../configuration-language/expressions/referencing-exports/ # /docs/agent/latest/flow/configuration-language/expressions/referencing-exports/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/referencing_exports/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/referencing_exports/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/referencing_exports/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/expressions/referencing_exports/ description: Learn about referencing component exports title: Referencing component exports weight: 200 @@ -20,8 +8,7 @@ weight: 200 # Referencing component exports Referencing exports enables River to configure and connect components dynamically using expressions. -While components can work in isolation, they're more useful when one component's behavior and data flow are bound to the exports of another, -building a dependency relationship between the two. +While components can work in isolation, they're more useful when one component's behavior and data flow are bound to the exports of another, building a dependency relationship between the two. Such references can only appear as part of another component's arguments or a configuration block's fields. Components can't reference themselves. @@ -60,7 +47,4 @@ After the value is resolved, it must match the [type][] of the attribute it is a While you can only configure attributes using the basic River types, the exports of components can take on special internal River types, such as Secrets or Capsules, which expose different functionality. -{{% docs/reference %}} -[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" -[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" -{{% /docs/reference %}} \ No newline at end of file +[type]: ../types_and_values/ diff --git a/docs/sources/flow/concepts/config-language/expressions/types_and_values.md b/docs/sources/concepts/config-language/expressions/types_and_values.md similarity index 82% rename from docs/sources/flow/concepts/config-language/expressions/types_and_values.md rename to docs/sources/concepts/config-language/expressions/types_and_values.md index 70afaf7904..88c5046084 100644 --- a/docs/sources/flow/concepts/config-language/expressions/types_and_values.md +++ b/docs/sources/concepts/config-language/expressions/types_and_values.md @@ -1,17 +1,5 @@ --- -aliases: -- ../../configuration-language/expressions/types-and-values/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/types-and-values/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values/ -# Previous page aliases for backwards compatibility: -- ../../../configuration-language/expressions/types-and-values/ # /docs/agent/latest/flow/configuration-language/expressions/types-and-values/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/types_and_values/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/types_and_values/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/types_and_values/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/expressions/types_and_values/ description: Learn about the River types and values title: Types and values weight: 100 @@ -173,8 +161,8 @@ Don't confuse objects with blocks. * An _object_ is a value assigned to an [Attribute][]. You **must** use commas between key-value pairs on separate lines. * A [Block][] is a named structural element composed of multiple attributes. You **must not** use commas between attributes. -[Attribute]: {{< relref "../syntax.md#Attributes" >}} -[Block]: {{< relref "../syntax.md#Blocks" >}} +[Attribute]: ../../syntax/#attributes +[Block]: ../../syntax/#blocks {{< /admonition >}} ## Functions @@ -218,7 +206,4 @@ prometheus.scrape "default" { } ``` -{{% docs/reference %}} -[type]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components" -{{% /docs/reference %}} \ No newline at end of file +[component reference]: ../../../../reference/components/ diff --git a/docs/sources/concepts/config-language/files.md b/docs/sources/concepts/config-language/files.md new file mode 100644 index 0000000000..ec2bb8689d --- /dev/null +++ b/docs/sources/concepts/config-language/files.md @@ -0,0 +1,14 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/files/ +description: Learn about River files +title: Files +weight: 100 +--- + +# Files + +River files are plain text files with the `.river` file extension. +You can refer to each River file as a "configuration file" or a "River configuration." + +River files must be UTF-8 encoded and can contain Unicode characters. +River files can use Unix-style line endings (LF) and Windows-style line endings (CRLF), but formatters may replace all line endings with Unix-style ones. diff --git a/docs/sources/flow/concepts/config-language/syntax.md b/docs/sources/concepts/config-language/syntax.md similarity index 61% rename from docs/sources/flow/concepts/config-language/syntax.md rename to docs/sources/concepts/config-language/syntax.md index 6f55701dab..8cfb860241 100644 --- a/docs/sources/flow/concepts/config-language/syntax.md +++ b/docs/sources/concepts/config-language/syntax.md @@ -1,17 +1,5 @@ --- -aliases: -- ../configuration-language/syntax/ # /docs/agent/latest/flow/concepts/configuration-language/syntax/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/syntax/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/syntax/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/syntax/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/syntax/ -# Previous page aliases for backwards compatibility: -- ../../configuration-language/syntax/ # /docs/agent/latest/flow/configuration-language/syntax/ -- /docs/grafana-cloud/agent/flow/config-language/syntax/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/syntax/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/syntax/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/syntax/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/syntax/ +canonical: https://grafana.com/docs/alloy/latest/concepts/config-language/syntax/ description: Learn about the River syntax title: Syntax weight: 200 @@ -50,13 +38,11 @@ log_level = "debug" The `ATTRIBUTE_NAME` must be a valid River [identifier][]. -The `ATTRIBUTE_VALUE` can be either a constant value of a valid River [type][] (for example, a string, boolean, number), -or an [_expression_][expression] to represent or compute more complex attribute values. +The `ATTRIBUTE_VALUE` can be either a constant value of a valid River [type][] (for example, a string, boolean, number), or an [_expression_][expression] to represent or compute more complex attribute values. ### Blocks -You use _Blocks_ to configure the {{< param "PRODUCT_ROOT_NAME" >}}'s behavior as well as {{< param "PRODUCT_NAME" >}} -components by grouping any number of attributes or nested blocks using curly braces. +You use _Blocks_ to configure the {{< param "PRODUCT_ROOT_NAME" >}}'s behavior as well as {{< param "PRODUCT_NAME" >}} components by grouping any number of attributes or nested blocks using curly braces. Blocks have a _name_, an optional _label_ and a body that contains any number of arguments and nested unlabeled blocks. Some blocks can be defined more than once. @@ -97,8 +83,7 @@ If the `BLOCK_LABEL` must be set, it must be a valid River [identifier][] wrappe In these cases, you use the label to disambiguate between multiple top-level blocks of the same name. The following snippet defines a block named `local.file` with its label set to "token". -The block's body sets `filename` to the content of the `TOKEN_FILE_PATH` environment variable by using an expression, -and the `is_secret` attribute is set to the boolean `true`, marking the file content as sensitive. +The block's body sets `filename` to the content of the `TOKEN_FILE_PATH` environment variable by using an expression, and the `is_secret` attribute is set to the boolean `true`, marking the file content as sensitive. ```river local.file "token" { @@ -116,10 +101,6 @@ River ignores other newlines and you can can enter as many newlines as you want. [identifier]: #identifiers [identifier]: #identifiers - -{{% docs/reference %}} -[expression]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions" -[expression]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions" -[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" -[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" -{{% /docs/reference %}} \ No newline at end of file +[expression]: ../expressions/ +[type]: ../expressions/types_and_values +"/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/"/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/ \ No newline at end of file diff --git a/docs/sources/flow/concepts/custom_components.md b/docs/sources/concepts/custom_components.md similarity index 57% rename from docs/sources/flow/concepts/custom_components.md rename to docs/sources/concepts/custom_components.md index 8d7fff13f6..6b70b5a269 100644 --- a/docs/sources/flow/concepts/custom_components.md +++ b/docs/sources/concepts/custom_components.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../concepts/custom-components/ -- /docs/grafana-cloud/agent/flow/concepts/custom-components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/custom-components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/custom-components/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/custom-components/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/custom-components/ +canonical: https://grafana.com/docs/alloy/latest/concepts/custom-components/ description: Learn about custom components title: Custom components weight: 300 @@ -23,20 +17,17 @@ A custom component is composed of: ## Creating custom components -You can create a new custom component using [the `declare` configuration block][declare]. +You can create a new custom component using [the `declare` configuration block][declare]. The label of the block determines the name of the custom component. The following custom configuration blocks can be used inside a `declare` block: -* [argument][]: Create a new named argument, whose current value can be referenced using the expression `argument.NAME.value`. Argument values are determined by the user of a custom component. +* [argument][]: Create a new named argument, whose current value can be referenced using the expression `argument.NAME.value`. + Argument values are determined by the user of a custom component. * [export][]: Expose a new named value to custom component users. -Custom components are useful for reusing a common pipeline multiple times. To learn how to share custom components across multiple files, refer to [Modules][]. - -[declare]: {{< relref "../reference/config-blocks/declare.md" >}} -[argument]: {{< relref "../reference/config-blocks/argument.md" >}} -[export]: {{< relref "../reference/config-blocks/export.md" >}} -[Modules]: {{< relref "./modules.md" >}} +Custom components are useful for reusing a common pipeline multiple times. +To learn how to share custom components across multiple files, refer to [Modules][]. ## Example @@ -59,3 +50,7 @@ add "example" { // add.example.sum == 32 ``` +[declare]: ../../reference/config-blocks/declare/ +[argument]: ../../reference/config-blocks/argument/ +[export]: ../../reference/config-blocks/export/ +[Modules]: ../modules/ diff --git a/docs/sources/flow/concepts/modules.md b/docs/sources/concepts/modules.md similarity index 80% rename from docs/sources/flow/concepts/modules.md rename to docs/sources/concepts/modules.md index 3302d5fa54..37853be78f 100644 --- a/docs/sources/flow/concepts/modules.md +++ b/docs/sources/concepts/modules.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../concepts/modules/ -- /docs/grafana-cloud/agent/flow/concepts/modules/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/modules/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/modules/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/modules/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/modules/ +canonical: https://grafana.com/docs/alloy/latest/concepts/modules/ description: Learn about modules title: Modules weight: 400 @@ -18,23 +12,15 @@ The module passed as an argument to [the `run` command][run] is called the _main Modules can be [imported](#importing-modules) to enable the reuse of [custom components][] defined by that module. -[custom components]: {{< relref "./custom_components.md" >}} -[run]: {{< relref "../reference/cli/run.md" >}} - ## Importing modules A module can be _imported_, allowing the custom components defined by that module to be used by other modules, called the _importing module_. Modules can be imported from multiple locations using one of the `import` configuration blocks: -* [import.file]: Imports a module from a file on disk. -* [import.git]: Imports a module from a file located in a Git repository. -* [import.http]: Imports a module from the response of an HTTP request. -* [import.string]: Imports a module from a string. - -[import.file]: {{< relref "../reference/config-blocks/import.file.md" >}} -[import.git]: {{< relref "../reference/config-blocks/import.git.md" >}} -[import.http]: {{< relref "../reference/config-blocks/import.http.md" >}} -[import.string]: {{< relref "../reference/config-blocks/import.string.md" >}} +* [import.file][]: Imports a module from a file on disk. +* [import.git][]: Imports a module from a file located in a Git repository. +* [import.http][]: Imports a module from the response of an HTTP request. +* [import.string][]: Imports a module from a string. {{< admonition type="warning" >}} You can't import a module that contains top-level blocks other than `declare` or `import`. @@ -42,7 +28,8 @@ You can't import a module that contains top-level blocks other than `declare` or Modules are imported into a _namespace_ where the top-level custom components of the imported module are exposed to the importing module. The label of the import block specifies the namespace of an import. -For example, if a configuration contains a block called `import.file "my_module"`, then custom components defined by that module are exposed as `my_module.CUSTOM_COMPONENT_NAME`. Imported namespaces must be unique across a given importing module. +For example, if a configuration contains a block called `import.file "my_module"`, then custom components defined by that module are exposed as `my_module.CUSTOM_COMPONENT_NAME`. +Imported namespaces must be unique across a given importing module. If an import namespace matches the name of a built-in component namespace, such as `prometheus`, the built-in namespace is hidden from the importing module, and only components defined in the imported module may be used. @@ -115,10 +102,11 @@ loki.write "default" { # Classic modules (deprecated) {{< admonition type="caution" >}} -Modules were redesigned in v0.40 to simplify concepts. This section outlines the design of the original modules prior to v0.40. Classic modules are scheduled to be removed in the release after v0.40. +Modules were redesigned in v0.40 to simplify concepts. +This section outlines the design of the original modules prior to v0.40. +Classic modules are scheduled to be removed in the release after v0.40. {{< /admonition >}} - You use _Modules_ to create {{< param "PRODUCT_NAME" >}} configurations that you can load as a component. Modules are a great way to parameterize a configuration to create reusable pipelines. @@ -240,8 +228,15 @@ loki.write "default" { ``` [Module loader]: #module-loaders -[argument block]: https://grafana.com/docs/agent//flow/reference/config-blocks/argument -[export block]: https://grafana.com/docs/agent//flow/reference/config-blocks/export -[Component controller]: https://grafana.com/docs/agent//flow/concepts/component_controller -[Components]: https://grafana.com/docs/agent//flow/reference/components +[argument block]: ../../reference/config-blocks/argument/ +[export block]: ../../reference/config-blocks/export/ +[Component controller]: ../component_controller/ +[Components]: ../../reference/components/ {{< /collapse >}} + +[custom components]: ../custom_components/ +[run]: ../../reference/cli/run/ +[import.file]: ../../reference/config-blocks/import.file/ +[import.git]: ../../reference/config-blocks/import.git/ +[import.http]: ../../reference/config-blocks/import.http/ +[import.string]: ../../reference/config-blocks/import.string/ diff --git a/docs/sources/data-collection.md b/docs/sources/data-collection.md index 80fbd874cd..21d2655b00 100644 --- a/docs/sources/data-collection.md +++ b/docs/sources/data-collection.md @@ -1,36 +1,28 @@ --- -aliases: -- ./data-collection/ -- /docs/grafana-cloud/agent/data-collection/ -- /docs/grafana-cloud/monitor-infrastructure/agent/data-collection/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/data-collection/ -- /docs/grafana-cloud/send-data/agent/data-collection/ -canonical: https://grafana.com/docs/agent/latest/data-collection/ -description: Grafana Agent data collection +canonical: https://grafana.com/docs/alloy/latest/data-collection/ +description: Grafana Alloy data collection menuTitle: Data collection -title: Grafana Agent data collection -weight: 500 +title: Grafana Alloy data collection +weight: 900 --- -# Grafana Agent Data collection +# {{% param "PRODUCT_NAME" %}} Data collection -By default, Grafana Agent sends anonymous but uniquely identifiable usage information from -your Grafana Agent instance to Grafana Labs. These statistics are sent to `stats.grafana.org`. +By default, {{< param "PRODUCT_NAME" >}} sends anonymous but uniquely identifiable usage information from your {{< param "PRODUCT_NAME" >}} instance to Grafana Labs. +These statistics are sent to `stats.grafana.org`. -Statistics help us better understand how Grafana Agent is used. This helps us prioritize features and documentation. +Statistics help us better understand how {{< param "PRODUCT_NAME" >}} is used. This helps us prioritize features and documentation. The usage information includes the following details: * A randomly generated, anonymous unique ID (UUID). * Timestamp of when the UID was first generated. * Timestamp of when the report was created (by default, every four hours). -* Version of running Grafana Agent. -* Operating system Grafana Agent is running on. -* System architecture Grafana Agent is running on. -* List of enabled feature flags ([Static] mode only). -* List of enabled integrations ([Static] mode only). -* List of enabled [components][] ([Flow] mode only). -* Method used to deploy Grafana Agent, for example Docker, Helm, RPM, or Operator. +* Version of running {{< param "PRODUCT_NAME" >}}. +* Operating system {{< param "PRODUCT_NAME" >}} is running on. +* System architecture {{< param "PRODUCT_NAME" >}} is running on. +* List of enabled [components][] +* Method used to deploy {{< param "PRODUCT_NAME" >}}, for example Docker, Helm, RPM, or Operator. This list may change over time. All newly reported data is documented in the CHANGELOG. @@ -38,13 +30,5 @@ This list may change over time. All newly reported data is documented in the CHA You can use the `-disable-reporting` [command line flag][] to disable the reporting and opt-out of the data collection. -{{% docs/reference %}} -[command line flag]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[command line flag]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/cli/run.md" -[components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[components]: "/docs/grafana-cloud/ -> /docs/agent//flow/reference/cli/run.md" -[Static]: "/docs/agent/ -> /docs/agent//static" -[Static]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static -[Flow]: "/docs/agent/ -> /docs/agent//flow" -[Flow]: "/docs/grafana-cloud/ -> /docs/agent//flow" -{{% /docs/reference %}} \ No newline at end of file +[components]: ../concepts/components +[command line flag]: ../reference/cli/run diff --git a/docs/sources/flow/_index.md b/docs/sources/flow/_index.md deleted file mode 100644 index 1840476a07..0000000000 --- a/docs/sources/flow/_index.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/ -- /docs/grafana-cloud/send-data/agent/flow/ -canonical: https://grafana.com/docs/agent/latest/flow/ -description: Grafana Agent Flow is a component-based revision of Grafana Agent with - a focus on ease-of-use, debuggability, and adaptability -title: Flow mode -weight: 400 -cascade: - PRODUCT_NAME: Grafana Agent Flow - PRODUCT_ROOT_NAME: Grafana Agent ---- - -# {{% param "PRODUCT_NAME" %}} - -{{< param "PRODUCT_NAME" >}} is a _component-based_ revision of {{< param "PRODUCT_ROOT_NAME" >}} with a focus on ease-of-use, -debuggability, and ability to adapt to the needs of power users. - -Components allow for reusability, composability, and focus on a single task. - -* **Reusability** allows for the output of components to be reused as the input for multiple other components. -* **Composability** allows for components to be chained together to form a pipeline. -* **Single task** means the scope of a component is limited to one narrow task and thus has fewer side effects. - -## Features - -* Write declarative configurations with a Terraform-inspired configuration - language. -* Declare components to configure parts of a pipeline. -* Use expressions to bind components together to build a programmable pipeline. -* Includes a UI for debugging the state of a pipeline. - -## Example - -```river -// Discover Kubernetes pods to collect metrics from -discovery.kubernetes "pods" { - role = "pod" -} - -// Scrape metrics from Kubernetes pods and send to a prometheus.remote_write -// component. -prometheus.scrape "default" { - targets = discovery.kubernetes.pods.targets - forward_to = [prometheus.remote_write.default.receiver] -} - -// Get an API key from disk. -local.file "apikey" { - filename = "/var/data/my-api-key.txt" - is_secret = true -} - -// Collect and send metrics to a Prometheus remote_write endpoint. -prometheus.remote_write "default" { - endpoint { - url = "http://localhost:9009/api/prom/push" - - basic_auth { - username = "MY_USERNAME" - password = local.file.apikey.content - } - } -} -``` - - -## {{% param "PRODUCT_NAME" %}} configuration generator - -The {{< param "PRODUCT_NAME" >}} [configuration generator](https://grafana.github.io/agent-configurator/) helps you get a head start on creating flow code. - -{{< admonition type="note" >}} -This feature is experimental, and it doesn't support all River components. -{{< /admonition >}} - -## Next steps - -* [Install][] {{< param "PRODUCT_NAME" >}}. -* Learn about the core [Concepts][] of {{< param "PRODUCT_NAME" >}}. -* Follow the [Tutorials][] for hands-on learning of {{< param "PRODUCT_NAME" >}}. -* Consult the [Tasks][] instructions to accomplish common objectives with {{< param "PRODUCT_NAME" >}}. -* Check out the [Reference][] documentation to find specific information you might be looking for. - -{{% docs/reference %}} -[Install]: "/docs/agent/ -> /docs/agent//flow/get-started/install/" -[Install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/" -[Concepts]: "/docs/agent/ -> /docs/agent//flow/concepts/" -[Concepts]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/" -[Tasks]: "/docs/agent/ -> /docs/agent//flow/tasks/" -[Tasks]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/" -[Tutorials]: "/docs/agent/ -> /docs/agent//flow/tutorials/" -[Tutorials]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tutorials/ -[Reference]: "/docs/agent/ -> /docs/agent//flow/reference/" -[Reference]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/ -{{% /docs/reference %}} diff --git a/docs/sources/flow/concepts/_index.md b/docs/sources/flow/concepts/_index.md deleted file mode 100644 index 786af8e546..0000000000 --- a/docs/sources/flow/concepts/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -aliases: -- ../concepts/ -- /docs/grafana-cloud/agent/flow/concepts/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/ -description: Learn about the Grafana Agent Flow concepts -title: Concepts -weight: 100 ---- - -# Concepts - -This section explains the primary concepts of {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/concepts/clustering.md b/docs/sources/flow/concepts/clustering.md deleted file mode 100644 index e02a6131d4..0000000000 --- a/docs/sources/flow/concepts/clustering.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/concepts/clustering/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/clustering/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/clustering/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/clustering/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/clustering/ -description: Learn about Grafana Agent clustering concepts -labels: - stage: beta -menuTitle: Clustering -title: Clustering (beta) -weight: 500 ---- - -# Clustering (beta) - -Clustering enables a fleet of {{< param "PRODUCT_ROOT_NAME" >}}s to work together for workload distribution and high availability. -It helps create horizontally scalable deployments with minimal resource and operational overhead. - -To achieve this, {{< param "PRODUCT_NAME" >}} makes use of an eventually consistent model that assumes all participating -{{< param "PRODUCT_ROOT_NAME" >}}s are interchangeable and converge on using the same configuration file. - -The behavior of a standalone, non-clustered {{< param "PRODUCT_ROOT_NAME" >}} is the same as if it were a single-node cluster. - -You configure clustering by passing `cluster` command-line flags to the [run][] command. - -## Use cases - -### Target auto-distribution - -Target auto-distribution is the most basic use case of clustering. -It allows scraping components running on all peers to distribute the scrape load between themselves. -Target auto-distribution requires that all {{< param "PRODUCT_ROOT_NAME" >}} in the same cluster can reach the same service discovery APIs and scrape the same targets. - -You must explicitly enable target auto-distribution on components by defining a `clustering` block. - -```river -prometheus.scrape "default" { - clustering { - enabled = true - } - - ... -} -``` - -A cluster state change is detected when a new node joins or an existing node leaves. -All participating components locally recalculate target ownership and re-balance the number of targets they’re scraping without explicitly communicating ownership over the network. - -Target auto-distribution allows you to dynamically scale the number of {{< param "PRODUCT_ROOT_NAME" >}}s to distribute workload during peaks. -It also provides resiliency because targets are automatically picked up by one of the node peers if a node leaves. - -{{< param "PRODUCT_NAME" >}} uses a local consistent hashing algorithm to distribute targets, meaning that, on average, only ~1/N of the targets are redistributed. - -Refer to component reference documentation to discover whether it supports clustering, such as: - -- [prometheus.scrape][] -- [pyroscope.scrape][] -- [prometheus.operator.podmonitors][] -- [prometheus.operator.servicemonitors][] - -## Cluster monitoring and troubleshooting - -You can use the {{< param "PRODUCT_NAME" >}} UI [clustering page][] to monitor your cluster status. -Refer to [Debugging clustering issues][debugging] for additional troubleshooting information. - -{{% docs/reference %}} -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md#clustering-beta" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md#clustering-beta" -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md#clustering-beta" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape.md#clustering-beta" -[pyroscope.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/pyroscope.scrape.md#clustering-beta" -[pyroscope.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.scrape.md#clustering-beta" -[prometheus.operator.podmonitors]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.podmonitors.md#clustering-beta" -[prometheus.operator.podmonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.podmonitors.md#clustering-beta" -[prometheus.operator.servicemonitors]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.servicemonitors.md#clustering-beta" -[prometheus.operator.servicemonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.servicemonitors.md#clustering-beta" -[clustering page]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#clustering-page" -[clustering page]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#clustering-page" -[debugging]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#debugging-clustering-issues" -[debugging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#debugging-clustering-issues" -{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/concepts/config-language/expressions/_index.md b/docs/sources/flow/concepts/config-language/expressions/_index.md deleted file mode 100644 index 56dc4c1ee4..0000000000 --- a/docs/sources/flow/concepts/config-language/expressions/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -aliases: -- ../configuration-language/expressions/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/ -# Previous page aliases for backwards compatibility: -- ../../configuration-language/expressions/ # /docs/agent/latest/flow/configuration-language/expressions/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/ -description: Learn about expressions -title: Expressions -weight: 400 ---- - -# Expressions - -Expressions represent or compute values you can assign to attributes within a configuration. - -Basic expressions are literal values, like `"Hello, world!"` or `true`. -Expressions may also do things like [refer to values][] exported by components, perform arithmetic, or [call functions][]. - -You use expressions when you configure any component. -All component arguments have an underlying [type][]. -River checks the expression type before assigning the result to an attribute. - -{{% docs/reference %}} -[refer to values]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/referencing_exports" -[refer to values]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/referencing_exports" -[call functions]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/function_calls" -[call functions]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/function_calls" -[type]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values" -[type]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values" -{{% /docs/reference %}} diff --git a/docs/sources/flow/concepts/config-language/expressions/function_calls.md b/docs/sources/flow/concepts/config-language/expressions/function_calls.md deleted file mode 100644 index b9598fea91..0000000000 --- a/docs/sources/flow/concepts/config-language/expressions/function_calls.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -aliases: -- ../../configuration-language/expressions/function-calls/ # /docs/agent/latest/flow/concepts/configuration-language/expressions/function-calls/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/expressions/function_calls/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/expressions/function_calls/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/expressions/function_calls/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/function_calls/ -# Previous page aliases for backwards compatibility: -- ../../../configuration-language/expressions/function-calls/ # /docs/agent/latest/flow/configuration-language/expressions/function-calls/ -- /docs/grafana-cloud/agent/flow/config-language/expressions/function_calls/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/expressions/function_calls/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/expressions/function_calls/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/expressions/function_calls/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/expressions/function_calls/ -description: Learn about function calls -title: Function calls -weight: 400 ---- - -# Function calls - -You can use River function calls to build richer expressions. - -Functions take zero or more arguments as their input and always return a single value as their output. -You can't construct functions. You can call functions from River's standard library or export them from a component. - -If a function fails, the expression isn't evaluated, and an error is reported. - -## Standard library functions - -River contains a [standard library][] of functions. -Some functions enable interaction with the host system, for example, reading from an environment variable. -Some functions allow for more complex expressions, for example, concatenating arrays or decoding JSON strings into objects. - -```river -env("HOME") -json_decode(local.file.cfg.content)["namespace"] -``` - -{{% docs/reference %}} -[standard library]: "/docs/agent/ -> /docs/agent//flow/reference/stdlib" -[standard library]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/stdlib" -{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/concepts/config-language/files.md b/docs/sources/flow/concepts/config-language/files.md deleted file mode 100644 index bd5565635f..0000000000 --- a/docs/sources/flow/concepts/config-language/files.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -aliases: -- ../configuration-language/files/ # /docs/agent/latest/flow/concepts/configuration-language/files/ -- /docs/grafana-cloud/agent/flow/concepts/config-language/files/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/concepts/config-language/files/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/concepts/config-language/files/ -- /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/files/ -# Previous page aliases for backwards compatibility: -- ../../configuration-language/files/ # /docs/agent/latest/flow/configuration-language/files/ -- /docs/grafana-cloud/agent/flow/config-language/files/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/config-language/files/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/config-language/files/ -- /docs/grafana-cloud/send-data/agent/flow/config-language/files/ -canonical: https://grafana.com/docs/agent/latest/flow/concepts/config-language/files/ -description: Learn about River files -title: Files -weight: 100 ---- - -# Files - -River files are plain text files with the `.river` file extension. -You can refer to each River file as a "configuration file" or a "River configuration." - -River files must be UTF-8 encoded and can contain Unicode characters. -River files can use Unix-style line endings (LF) and Windows-style line endings (CRLF), but formatters may replace all line endings with Unix-style ones. diff --git a/docs/sources/flow/get-started/_index.md b/docs/sources/flow/get-started/_index.md deleted file mode 100644 index 444b64f5af..0000000000 --- a/docs/sources/flow/get-started/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/ -# Previous docs aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/ -- /docs/grafana-cloud/send-data/agent/flow/setup/ -- ./setup/ # /docs/agent/latest/flow/setup/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/ -description: Learn how to install and use Grafana Agent Flow -menuTitle: Get started -title: Get started with Grafana Agent Flow -weight: 50 ---- - -# Get started with {{% param "PRODUCT_NAME" %}} - -This section covers topics that help you get started with {{< param "PRODUCT_NAME" >}}, -including installation, running {{< param "PRODUCT_NAME" >}}, overview of deployment topologies, and more. - -{{< section >}} diff --git a/docs/sources/flow/get-started/install/_index.md b/docs/sources/flow/get-started/install/_index.md deleted file mode 100644 index 25b9a5b2f1..0000000000 --- a/docs/sources/flow/get-started/install/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/ -# Previous docs aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/install/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/ -- /docs/sources/flow/install/ -- ../setup/install/ # /docs/agent/latest/flow/setup/install/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/ -description: Learn how to install Grafana Agent Flow -menuTitle: Install -title: Install Grafana Agent Flow -weight: 50 ---- - -# Install {{% param "PRODUCT_NAME" %}} - -You can install {{< param "PRODUCT_NAME" >}} on Docker, Kubernetes, Linux, macOS, or Windows. - -The following architectures are supported: - -- Linux: AMD64, ARM64 -- Windows: AMD64 -- macOS: AMD64 (Intel), ARM64 (Apple Silicon) -- FreeBSD: AMD64 - -{{< admonition type="note" >}} -Installing {{< param "PRODUCT_NAME" >}} on other operating systems is possible, but isn't recommended or supported. -{{< /admonition >}} - -{{< section >}} - -## Data collection - -By default, {{< param "PRODUCT_NAME" >}} sends anonymous usage information to Grafana Labs. Refer to [data collection][] for more information -about what data is collected and how you can opt-out. - -{{% docs/reference %}} -[data collection]: "/docs/agent/ -> /docs/agent//data-collection.md" -[data collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/data-collection.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/install/kubernetes.md b/docs/sources/flow/get-started/install/kubernetes.md deleted file mode 100644 index d045c7b5ce..0000000000 --- a/docs/sources/flow/get-started/install/kubernetes.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/kubernetes/ -# Previous docs aliases for backwards compatibility: -- ../../install/kubernetes/ # /docs/agent/latest/flow/install/kubernetes/ -- /docs/grafana-cloud/agent/flow/setup/install/kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/kubernetes/ -- ../../setup/install/kubernetes/ # /docs/agent/latest/flow/setup/install/kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/kubernetes/ -description: Learn how to deploy Grafana Agent Flow on Kubernetes -menuTitle: Kubernetes -title: Deploy Grafana Agent Flow on Kubernetes -weight: 200 ---- - -# Deploy {{% param "PRODUCT_NAME" %}} on Kubernetes - -{{< param "PRODUCT_NAME" >}} can be deployed on Kubernetes by using the Helm chart for {{< param "PRODUCT_ROOT_NAME" >}}. - -## Before you begin - -* Install [Helm][] on your computer. -* Configure a Kubernetes cluster that you can use for {{< param "PRODUCT_NAME" >}}. -* Configure your local Kubernetes context to point to the cluster. - -## Deploy - -{{< admonition type="note" >}} -These instructions show you how to install the generic [Helm chart](https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent) for {{< param "PRODUCT_NAME" >}}. -You can deploy {{< param "PRODUCT_ROOT_NAME" >}} either in static mode or flow mode. The Helm chart deploys {{< param "PRODUCT_NAME" >}} by default. -{{< /admonition >}} - -To deploy {{< param "PRODUCT_ROOT_NAME" >}} on Kubernetes using Helm, run the following commands in a terminal window: - -1. Add the Grafana Helm chart repository: - - ```shell - helm repo add grafana https://grafana.github.io/helm-charts - ``` - -1. Update the Grafana Helm chart repository: - - ```shell - helm repo update - ``` - -1. Install {{< param "PRODUCT_ROOT_NAME" >}}: - - ```shell - helm install grafana/grafana-agent - ``` - - Replace the following: - - - _``_: The name to use for your {{< param "PRODUCT_ROOT_NAME" >}} installation, such as `grafana-agent-flow`. - -For more information on the {{< param "PRODUCT_ROOT_NAME" >}} Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. - -[Artifact Hub]: https://artifacthub.io/packages/helm/grafana/grafana-agent - -## Next steps - -- [Configure {{< param "PRODUCT_NAME" >}}][Configure] - -[Helm]: https://helm.sh - -{{% docs/reference %}} -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-kubernetes.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-kubernetes.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/install/macos.md b/docs/sources/flow/get-started/install/macos.md deleted file mode 100644 index c16f70e6d9..0000000000 --- a/docs/sources/flow/get-started/install/macos.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/macos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/macos/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/macos/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/macos/ -# Previous docs aliases for backwards compatibility: -- ../../install/macos/ # /docs/agent/latest/flow/install/macos/ -- /docs/grafana-cloud/agent/flow/setup/install/macos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/macos/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/macos/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/macos/ -- ../../setup/install/macos/ # /docs/agent/latest/flow/setup/install/macos/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/macos/ -description: Learn how to install Grafana AgentFlow on macOS -menuTitle: macOS -title: Install Grafana Agent Flow on macOS -weight: 400 ---- - -# Install {{% param "PRODUCT_NAME" %}} on macOS - -You can install {{< param "PRODUCT_NAME" >}} on macOS with Homebrew . - -{{< admonition type="note" >}} -The default prefix for Homebrew on Intel is `/usr/local`. The default prefix for Homebrew on Apple Silicon is `/opt/Homebrew`. To verify the default prefix for Homebrew on your computer, open a terminal window and type `brew --prefix`. -{{< /admonition >}} - -## Before you begin - -* Install [Homebrew][] on your computer. - -## Install - -To install {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in a terminal window. - -1. Add the Grafana Homebrew tap: - - ```shell - brew tap grafana/grafana - ``` - -1. Install {{< param "PRODUCT_NAME" >}}: - - ```shell - brew install grafana-agent-flow - ``` - -## Upgrade - -To upgrade {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in a terminal window. - -1. Upgrade {{< param "PRODUCT_NAME" >}}: - - ```shell - brew upgrade grafana-agent-flow - ``` - -1. Restart {{< param "PRODUCT_NAME" >}}: - - ```shell - brew services restart grafana-agent-flow - ``` - -## Uninstall - -To uninstall {{< param "PRODUCT_NAME" >}} on macOS, run the following command in a terminal window: - -```shell -brew uninstall grafana-agent-flow -``` - -## Next steps - -- [Run {{< param "PRODUCT_NAME" >}}][Run] -- [Configure {{< param "PRODUCT_NAME" >}}][Configure] - -[Homebrew]: https://brew.sh - -{{% docs/reference %}} -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/macos.md" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/macos.md" -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-macos.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/run/_index.md b/docs/sources/flow/get-started/run/_index.md deleted file mode 100644 index f98f870735..0000000000 --- a/docs/sources/flow/get-started/run/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/run/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/run/ -- /docs/sources/flow/run/ -# Previous pages aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/start-agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/start-agent/ -- /docs/grafana-cloud/send-data/agent/flow/setup/start-agent/ -- ../setup/start-agent/ # /docs/agent/latest/flow/setup/start-agent/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/ -description: Learn how to run Grafana Agent Flow -menuTitle: Run -title: Run Grafana Agent Flow -weight: 50 ---- - -# Run {{% param "PRODUCT_NAME" %}} - -Use the following pages to learn how to start, restart, and stop {{< param "PRODUCT_NAME" >}} after it is installed. -For installation instructions, refer to [Install {{< param "PRODUCT_NAME" >}}][Install]. - -{{< section >}} - -{{% docs/reference %}} -[Install]: "/docs/agent/ -> /docs/agent//flow/get-started/install/" -[Install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/" -{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/run/macos.md b/docs/sources/flow/get-started/run/macos.md deleted file mode 100644 index 8c7a055dd8..0000000000 --- a/docs/sources/flow/get-started/run/macos.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -aliases: - - /docs/grafana-cloud/agent/flow/get-started/run/macos/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/macos/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/macos/ - - /docs/grafana-cloud/send-data/agent/flow/get-started/run/macos/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/macos/ -description: Learn how to run Grafana Agent Flow on macOS -menuTitle: macOS -title: Run Grafana Agent Flow on macOS -weight: 400 ---- - -# Run {{% param "PRODUCT_NAME" %}} on macOS - -{{< param "PRODUCT_NAME" >}} is [installed][InstallMacOS] as a launchd service on macOS. - -## Start {{% param "PRODUCT_NAME" %}} - -To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -brew services start grafana-agent-flow -``` - -{{< param "PRODUCT_NAME" >}} automatically runs when the system starts. - -(Optional) To verify that the service is running, run the following command in a terminal window: - -```shell -brew services info grafana-agent-flow -``` - -## Restart {{% param "PRODUCT_NAME" %}} - -To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -brew services restart grafana-agent-flow -``` - -## Stop {{% param "PRODUCT_NAME" %}} - -To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -brew services stop grafana-agent-flow -``` - -## View {{% param "PRODUCT_NAME" %}} logs on macOS - -By default, logs are written to `$(brew --prefix)/var/log/grafana-agent-flow.log` and -`$(brew --prefix)/var/log/grafana-agent-flow.err.log`. - -If you followed [Configure the {{< param "PRODUCT_NAME" >}} service][ConfigureService] and changed the path where logs are written, -refer to your current copy of the {{< param "PRODUCT_NAME" >}} formula to locate your log files. - -## Next steps - -- [Configure {{< param "PRODUCT_NAME" >}}][ConfigureMacOS] - -{{% docs/reference %}} -[InstallMacOS]: "/docs/agent/ -> /docs/agent//flow/get-started/install/macos.md" -[InstallMacOS]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/macos.md" -[ConfigureMacOS]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-macos.md" -[ConfigureMacOS]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos.md" -[ConfigureService]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-macos.md#configure-the-grafana-agent-flow-service" -[ConfigureService]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos.md#configure-the-grafana-agent-flow-service" -{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/run/windows.md b/docs/sources/flow/get-started/run/windows.md deleted file mode 100644 index 2ee89710b0..0000000000 --- a/docs/sources/flow/get-started/run/windows.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -aliases: - - /docs/grafana-cloud/agent/flow/get-started/run/windows/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/windows/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/windows/ - - /docs/grafana-cloud/send-data/agent/flow/get-started/run/windows/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/windows/ -description: Learn how to run Grafana Agent Flow on Windows -menuTitle: Windows -title: Run Grafana Agent Flow on Windows -weight: 500 ---- - -# Run {{% param "PRODUCT_NAME" %}} on Windows - -{{< param "PRODUCT_NAME" >}} is [installed][InstallWindows] as a Windows Service. The service is configured to automatically run on startup. - -To verify that {{< param "PRODUCT_NAME" >}} is running as a Windows Service: - -1. Open the Windows Services manager (services.msc): - - 1. Right click on the Start Menu and select **Run**. - - 1. Type: `services.msc` and click **OK**. - -1. Scroll down to find the **{{< param "PRODUCT_NAME" >}}** service and verify that the **Status** is **Running**. - -## View {{% param "PRODUCT_NAME" %}} logs - -When running on Windows, {{< param "PRODUCT_NAME" >}} writes its logs to Windows Event -Logs with an event source name of **{{< param "PRODUCT_NAME" >}}**. - -To view the logs, perform the following steps: - -1. Open the Event Viewer: - - 1. Right click on the Start Menu and select **Run**. - - 1. Type `eventvwr` and click **OK**. - -1. In the Event Viewer, click on **Windows Logs > Application**. - -1. Search for events with the source **{{< param "PRODUCT_NAME" >}}**. - -## Next steps - -- [Configure {{< param "PRODUCT_NAME" >}}][Configure] - -{{% docs/reference %}} -[InstallWindows]: "/docs/agent/ -> /docs/agent//flow/get-started/install/windows.md" -[InstallWindows]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/windows.md" -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-windows.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-windows.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/reference/_index.md b/docs/sources/flow/reference/_index.md deleted file mode 100644 index 5c4e88aac9..0000000000 --- a/docs/sources/flow/reference/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/ -- /docs/grafana-cloud/send-data/agent/flow/reference/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/ -description: The reference-level documentaiton for Grafana Agent -menuTitle: Reference -title: Grafana Agent Flow Reference -weight: 600 ---- - -# {{% param "PRODUCT_NAME" %}} Reference - -This section provides reference-level documentation for the various parts of {{< param "PRODUCT_NAME" >}}: - -{{< section >}} diff --git a/docs/sources/flow/reference/cli/_index.md b/docs/sources/flow/reference/cli/_index.md deleted file mode 100644 index 43fa4be774..0000000000 --- a/docs/sources/flow/reference/cli/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/cli/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/ -- /docs/grafana-cloud/send-data/agent/flow/reference/cli/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/ -description: Learn about the Grafana Agent command line interface -menuTitle: Command-line interface -title: The Grafana Agent command-line interface -weight: 100 ---- - -# The {{% param "PRODUCT_ROOT_NAME" %}} command-line interface - -When in Flow mode, the `grafana-agent` binary exposes a command-line interface with -subcommands to perform various operations. - -The most common subcommand is [`run`][run] which accepts a configuration file and -starts {{< param "PRODUCT_NAME" >}}. - -Available commands: - -* [`convert`][convert]: Convert a {{< param "PRODUCT_ROOT_NAME" >}} configuration file. -* [`fmt`][fmt]: Format a {{< param "PRODUCT_NAME" >}} configuration file. -* [`run`][run]: Start {{< param "PRODUCT_NAME" >}}, given a configuration file. -* [`tools`][tools]: Read the WAL and provide statistical information. -* `completion`: Generate shell completion for the `grafana-agent-flow` CLI. -* `help`: Print help for supported commands. - -[run]: {{< relref "./run.md" >}} -[fmt]: {{< relref "./fmt.md" >}} -[convert]: {{< relref "./convert.md" >}} -[tools]: {{< relref "./tools.md" >}} diff --git a/docs/sources/flow/reference/cli/convert.md b/docs/sources/flow/reference/cli/convert.md deleted file mode 100644 index 3b44d662e8..0000000000 --- a/docs/sources/flow/reference/cli/convert.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/cli/convert/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/convert/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/convert/ -- /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/convert/ -description: Learn about the convert command -labels: - stage: beta -menuTitle: convert -title: The convert command -weight: 100 ---- - -# The convert command - -The `convert` command converts a supported configuration format to {{< param "PRODUCT_NAME" >}} River format. - -## Usage - -Usage: - -* `AGENT_MODE=flow grafana-agent convert [ ...] ` -* `grafana-agent-flow convert [ ...] ` - - Replace the following: - - * _``_: One or more flags that define the input and output of the command. - * _``_: The {{< param "PRODUCT_ROOT_NAME" >}} configuration file. - -If the `FILE_NAME` argument isn't provided or if the `FILE_NAME` argument is -equal to `-`, `convert` converts the contents of standard input. Otherwise, -`convert` reads and converts the file from disk specified by the argument. - -There are several different flags available for the `convert` command. You can use the `--output` flag to write the contents of the converted configuration to a specified path. You can use the `--report` flag to generate a diagnostic report. The `--bypass-errors` flag allows you to bypass any [errors] generated during the file conversion. - -The command fails if the source configuration has syntactically incorrect -configuration or can't be converted to {{< param "PRODUCT_NAME" >}} River format. - -The following flags are supported: - -* `--output`, `-o`: The filepath and filename where the output is written. - -* `--report`, `-r`: The filepath and filename where the report is written. - -* `--source-format`, `-f`: Required. The format of the source file. Supported formats: [prometheus], [promtail], [static]. - -* `--bypass-errors`, `-b`: Enable bypassing errors when converting. - -* `--extra-args`, `e`: Extra arguments from the original format used by the converter. - -[prometheus]: #prometheus -[promtail]: #promtail -[static]: #static -[errors]: #errors - -### Defaults - -{{< param "PRODUCT_NAME" >}} defaults are managed as follows: -* If a provided source configuration value matches a {{< param "PRODUCT_NAME" >}} default value, the property is left off the output. -* If a non-provided source configuration value default matches a {{< param "PRODUCT_NAME" >}} default value, the property is left off the output. -* If a non-provided source configuration value default doesn't match a {{< param "PRODUCT_NAME" >}} default value, the default value is included in the output. - -### Errors - -Errors are defined as non-critical issues identified during the conversion -where an output can still be generated. These can be bypassed using the -`--bypass-errors` flag. - -### Prometheus - -Using the `--source-format=prometheus` will convert the source configuration from -[Prometheus v2.45](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/) -to {{< param "PRODUCT_NAME" >}} configuration. - -This includes Prometheus features such as -[scrape_config](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config), -[relabel_config](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#relabel_config), -[metric_relabel_configs](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#metric_relabel_configs), -[remote_write](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#remote_write), -and many supported *_sd_configs. Unsupported features in a source configuration result -in [errors]. - -Refer to [Migrate from Prometheus to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-prometheus/" >}}) for a detailed migration guide. - -### Promtail - -Using the `--source-format=promtail` will convert the source configuration from -[Promtail v2.8.x](/docs/loki/v2.8.x/clients/promtail/) -to {{< param "PRODUCT_NAME" >}} configuration. - -Nearly all [Promtail features](/docs/loki/v2.8.x/clients/promtail/configuration/) -are supported and can be converted to {{< param "PRODUCT_NAME" >}} configuration. - -If you have unsupported features in a source configuration, you will receive [errors] when you convert to a flow configuration. The converter will -also raise warnings for configuration options that may require your attention. - -Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-promtail/" >}}) for a detailed migration guide. - -### Static - -Using the `--source-format=static` will convert the source configuration from a -[Grafana Agent Static]({{< relref "../../../static" >}}) configuration to a {{< param "PRODUCT_NAME" >}} configuration. - -Include `--extra-args` for passing additional command line flags from the original format. -For example, `--extra-args="-enable-features=integrations-next"` will convert a Grafana Agent Static -[integrations-next]({{< relref "../../../static/configuration/integrations/integrations-next/" >}}) -configuration to a {{< param "PRODUCT_NAME" >}} configuration. You can also -expand environment variables with `--extra-args="-config.expand-env"`. You can combine multiple command line -flags with a space between each flag, for example `--extra-args="-enable-features=integrations-next -config.expand-env"`. - -If you have unsupported features in a Static mode source configuration, you will receive [errors][] when you convert to a Flow mode configuration. The converter will -also raise warnings for configuration options that may require your attention. - -Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-static/" >}}) for a detailed migration guide. \ No newline at end of file diff --git a/docs/sources/flow/reference/cli/fmt.md b/docs/sources/flow/reference/cli/fmt.md deleted file mode 100644 index 7a266921d3..0000000000 --- a/docs/sources/flow/reference/cli/fmt.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/cli/fmt/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/fmt/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/fmt/ -- /docs/grafana-cloud/send-data/agent/flow/reference/cli/fmt/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/fmt/ -description: Learn about the fmt command -menuTitle: fmt -title: The fmt command -weight: 200 ---- - -# The fmt command - -The `fmt` command formats a given {{< param "PRODUCT_NAME" >}} configuration file. - -## Usage - -Usage: - -* `AGENT_MODE=flow grafana-agent fmt [FLAG ...] FILE_NAME` -* `grafana-agent-flow fmt [FLAG ...] FILE_NAME` - - Replace the following: - - * `FLAG`: One or more flags that define the input and output of the command. - * `FILE_NAME`: The {{< param "PRODUCT_NAME" >}} configuration file. - -If the `FILE_NAME` argument is not provided or if the `FILE_NAME` argument is -equal to `-`, `fmt` formats the contents of standard input. Otherwise, -`fmt` reads and formats the file from disk specified by the argument. - -The `--write` flag can be specified to replace the contents of the original -file on disk with the formatted results. `--write` can only be provided when -`fmt` is not reading from standard input. - -The command fails if the file being formatted has syntactically incorrect River -configuration, but does not validate whether Flow components are configured -properly. - -The following flags are supported: - -* `--write`, `-w`: Write the formatted file back to disk when not reading from - standard input. diff --git a/docs/sources/flow/reference/components/_index.md b/docs/sources/flow/reference/components/_index.md deleted file mode 100644 index 3eafecb3c1..0000000000 --- a/docs/sources/flow/reference/components/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/ -description: Learn about the components in Grafana Agent Flow -title: Components reference -weight: 300 ---- - -# Components reference - -This section contains reference documentation for all recognized [components][]. - -{{< section >}} - -[components]: {{< relref "../../concepts/components.md" >}} diff --git a/docs/sources/flow/reference/components/faro.receiver.md b/docs/sources/flow/reference/components/faro.receiver.md deleted file mode 100644 index 36e37fa5fc..0000000000 --- a/docs/sources/flow/reference/components/faro.receiver.md +++ /dev/null @@ -1,286 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/faro.receiver/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/faro.receiver/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/faro.receiver/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/faro.receiver/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/faro.receiver/ -description: Learn about the faro.receiver -title: faro.receiver ---- - -# faro.receiver - -`faro.receiver` accepts web application telemetry data from the [Grafana Faro Web SDK][faro-sdk] -and forwards it to other components for future processing. - -[faro-sdk]: https://github.com/grafana/faro-web-sdk - -## Usage - -```river -faro.receiver "LABEL" { - output { - logs = [LOKI_RECEIVERS] - traces = [OTELCOL_COMPONENTS] - } -} -``` - -## Arguments - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`extra_log_labels` | `map(string)` | Extra labels to attach to emitted log lines. | `{}` | no - -## Blocks - -The following blocks are supported inside the definition of `faro.receiver`: - -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -server | [server][] | Configures the HTTP server. | no -server > rate_limiting | [rate_limiting][] | Configures rate limiting for the HTTP server. | no -sourcemaps | [sourcemaps][] | Configures sourcemap retrieval. | no -sourcemaps > location | [location][] | Configures on-disk location for sourcemap retrieval. | no -output | [output][] | Configures where to send collected telemetry data. | yes - -[server]: #server-block -[rate_limiting]: #rate_limiting-block -[sourcemaps]: #sourcemaps-block -[location]: #location-block -[output]: #output-block - -### server block - -The `server` block configures the HTTP server managed by the `faro.receiver` -component. Clients using the [Grafana Faro Web SDK][faro-sdk] forward telemetry -data to this HTTP server for processing. - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`listen_address` | `string` | Address to listen for HTTP traffic on. | `127.0.0.1` | no -`listen_port` | `number` | Port to listen for HTTP traffic on. | `12347` | no -`cors_allowed_origins` | `list(string)` | Origins for which cross-origin requests are permitted. | `[]` | no -`api_key` | `secret` | Optional API key to validate client requests with. | `""` | no -`max_allowed_payload_size` | `string` | Maximum size (in bytes) for client requests. | `"5MiB"` | no - -By default, telemetry data is only accepted from applications on the same local -network as the browser. To accept telemetry data from a wider set of clients, -modify the `listen_address` attribute to the IP address of the appropriate -network interface to use. - -The `cors_allowed_origins` argument determines what origins browser requests -may come from. The default value, `[]`, disables CORS support. To support -requests from all origins, set `cors_allowed_origins` to `["*"]`. The `*` -character indicates a wildcard. - -When the `api_key` argument is non-empty, client requests must have an HTTP -header called `X-API-Key` matching the value of the `api_key` argument. -Requests that are missing the header or have the wrong value are rejected with -an `HTTP 401 Unauthorized` status code. If the `api_key` argument is empty, no -authentication checks are performed, and the `X-API-Key` HTTP header is -ignored. - -### rate_limiting block - -The `rate_limiting` block configures rate limiting for client requests. - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`enabled` | `bool` | Whether to enable rate limiting. | `true` | no -`rate` | `number` | Rate of allowed requests per second. | `50` | no -`burst_size` | `number` | Allowed burst size of requests. | `100` | no - -Rate limiting functions as a [token bucket algorithm][token-bucket], where -a bucket has a maximum capacity for up to `burst_size` requests and refills at a -rate of `rate` per second. - -Each HTTP request drains the capacity of the bucket by one. Once the bucket is -empty, HTTP requests are rejected with an `HTTP 429 Too Many Requests` status -code until the bucket has more available capacity. - -Configuring the `rate` argument determines how fast the bucket refills, and -configuring the `burst_size` argument determines how many requests can be -received in a burst before the bucket is empty and starts rejecting requests. - -[token-bucket]: https://en.wikipedia.org/wiki/Token_bucket - -### sourcemaps block - -The `sourcemaps` block configures how to retrieve sourcemaps. Sourcemaps are -then used to transform file and line information from minified code into the -file and line information from the original source code. - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`download` | `bool` | Whether to download sourcemaps. | `true` | no -`download_from_origins` | `list(string)` | Which origins to download sourcemaps from. | `["*"]` | no -`download_timeout` | `duration` | Timeout when downloading sourcemaps. | `"1s"` | no - -When exceptions are sent to the `faro.receiver` component, it can download -sourcemaps from the web application. You can disable this behavior by setting -the `download` argument to `false`. - -The `download_from_origins` argument determines which origins a sourcemap may -be downloaded from. The origin is attached to the URL that a browser is sending -telemetry data from. The default value, `["*"]`, enables downloading sourcemaps -from all origins. The `*` character indicates a wildcard. - -By default, sourcemap downloads are subject to a timeout of `"1s"`, specified -by the `download_timeout` argument. Setting `download_timeout` to `"0s"` -disables timeouts. - -To retrieve sourcemaps from disk instead of the network, specify one or more -[`location` blocks][location]. When `location` blocks are provided, they are -checked first for sourcemaps before falling back to downloading. - -### location block - -The `location` block declares a location where sourcemaps are stored on the -filesystem. The `location` block can be specified multiple times to declare -multiple locations where sourcemaps are stored. - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`path` | `string` | The path on disk where sourcemaps are stored. | | yes -`minified_path_prefix` | `string` | The prefix of the minified path sent from browsers. | | yes - -The `minified_path_prefix` argument determines the prefix of paths to -Javascript files, such as `http://example.com/`. The `path` argument then -determines where to find the sourcemap for the file. - -For example, given the following location block: - -``` -location { - path = "/var/my-app/build" - minified_path_prefix = "http://example.com/" -} -``` - -To look up the sourcemaps for a file hosted at `http://example.com/foo.js`, the -`faro.receiver` component will: - -1. Remove the minified path prefix to extract the path to the file (`foo.js`). -2. Search for that file path with a `.map` extension (`foo.js.map`) in `path` - (`/var/my-app/build/foo.js.map`). - -Optionally, the value for the `path` argument may contain `{{ .Release }}` as a -template value, such as `/var/my-app/{{ .Release }}/build`. The template value -will be replaced with the release value provided by the [Faro Web App SDK][faro-sdk]. - -### output block - -The `output` block specifies where to forward collected logs and traces. - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`logs` | `list(LogsReceiver)` | A list of `loki` components to forward logs to. | `[]` | no -`traces` | `list(otelcol.Consumer)` | A list of `otelcol` components to forward traces to. | `[]` | no - -## Exported fields - -`faro.receiver` does not export any fields. - -## Component health - -`faro.receiver` is reported as unhealthy when the integrated server fails to -start. - -## Debug information - -`faro.receiver` does not expose any component-specific debug information. - -## Debug metrics - -`faro.receiver` exposes the following metrics for monitoring the component: - -* `faro_receiver_logs_total` (counter): Total number of ingested logs. -* `faro_receiver_measurements_total` (counter): Total number of ingested measurements. -* `faro_receiver_exceptions_total` (counter): Total number of ingested exceptions. -* `faro_receiver_events_total` (counter): Total number of ingested events. -* `faro_receiver_exporter_errors_total` (counter): Total number of errors produced by an internal exporter. -* `faro_receiver_request_duration_seconds` (histogram): Time (in seconds) spent serving HTTP requests. -* `faro_receiver_request_message_bytes` (histogram): Size (in bytes) of HTTP requests received from clients. -* `faro_receiver_response_message_bytes` (histogram): Size (in bytes) of HTTP responses sent to clients. -* `faro_receiver_inflight_requests` (gauge): Current number of inflight requests. -* `faro_receiver_sourcemap_cache_size` (counter): Number of items in sourcemap cache per origin. -* `faro_receiver_sourcemap_downloads_total` (counter): Total number of sourcemap downloads performed per origin and status. -* `faro_receiver_sourcemap_file_reads_total` (counter): Total number of sourcemap retrievals using the filesystem per origin and status. - -## Example - -```river -faro.receiver "default" { - server { - listen_address = "NETWORK_ADDRESS" - } - - sourcemaps { - location { - path = "PATH_TO_SOURCEMAPS" - minified_path_prefix = "WEB_APP_PREFIX" - } - } - - output { - logs = [loki.write.default.receiver] - traces = [otelcol.exporter.otlp.traces.input] - } -} - -loki.write "default" { - endpoint { - url = "https://LOKI_ADDRESS/api/v1/push" - } -} - -otelcol.exporter.otlp "traces" { - client { - endpoint = "OTLP_ADDRESS" - } -} -``` - -Replace the following: - -* `NETWORK_ADDRESS`: IP address of the network interface to listen to traffic - on. This IP address must be reachable by browsers using the web application - to instrument. - -* `PATH_TO_SOURCEMAPS`: Path on disk where sourcemaps are located. - -* `WEB_APP_PREFIX`: Prefix of the web application being instrumented. - -* `LOKI_ADDRESS`: Address of the Loki server to send logs to. - - * If authentication is required to send logs to the Loki server, refer to the - documentation of [loki.write][] for more information. - -* `OTLP_ADDRESS`: The address of the OTLP-compatible server to send traces to. - - * If authentication is required to send logs to the Loki server, refer to the - documentation of [otelcol.exporter.otlp][] for more information. - -[loki.write]: {{< relref "./loki.write.md" >}} -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} - - - -## Compatible components - -`faro.receiver` can accept arguments from the following components: - -- Components that export [Loki `LogsReceiver`](../../compatibility/#loki-logsreceiver-exporters) -- Components that export [OpenTelemetry `otelcol.Consumer`](../../compatibility/#opentelemetry-otelcolconsumer-exporters) - - -{{< admonition type="note" >}} -Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. -Refer to the linked documentation for more details. -{{< /admonition >}} - - diff --git a/docs/sources/flow/reference/components/local.file.md b/docs/sources/flow/reference/components/local.file.md deleted file mode 100644 index 5e935a0bbb..0000000000 --- a/docs/sources/flow/reference/components/local.file.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/local.file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/local.file/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/local.file/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/local.file/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/local.file/ -description: Learn about local.file -title: local.file ---- - -# local.file - -`local.file` exposes the contents of a file on disk to other components. The -file will be watched for changes so that its latest content is always exposed. - -The most common use of `local.file` is to load secrets (e.g., API keys) from -files. - -Multiple `local.file` components can be specified by giving them different -labels. - -## Usage - -```river -local.file "LABEL" { - filename = FILE_NAME -} -``` - -## Arguments - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`filename` | `string` | Path of the file on disk to watch | | yes -`detector` | `string` | Which file change detector to use (fsnotify, poll) | `"fsnotify"` | no -`poll_frequency` | `duration` | How often to poll for file changes | `"1m"` | no -`is_secret` | `bool` | Marks the file as containing a [secret][] | `false` | no - -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} - -{{< docs/shared lookup="flow/reference/components/local-file-arguments-text.md" source="agent" version="" >}} - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description ----- | ---- | ----------- -`content` | `string` or `secret` | The contents of the file from the most recent read - -The `content` field will have the `secret` type only if the `is_secret` -argument was true. - -## Component health - -`local.file` will be reported as healthy whenever if the watched file was read -successfully. - -Failing to read the file whenever an update is detected (or after the poll -period elapses) will cause the component to be reported as unhealthy. When -unhealthy, exported fields will be kept at the last healthy value. The read -error will be exposed as a log message and in the debug information for the -component. - -## Debug information - -`local.file` does not expose any component-specific debug information. - -## Debug metrics - -* `agent_local_file_timestamp_last_accessed_unix_seconds` (gauge): The - timestamp, in Unix seconds, that the file was last successfully accessed. - -## Example - -```river -local.file "secret_key" { - filename = "/var/secrets/password.txt" - is_secret = true -} -``` diff --git a/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md b/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md deleted file mode 100644 index 8a5c8fdfaa..0000000000 --- a/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.azure_event_hubs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.azure_event_hubs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.azure_event_hubs/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.azure_event_hubs/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.azure_event_hubs/ -description: Learn about loki.source.azure_event_hubs -title: loki.source.azure_event_hubs ---- - -# loki.source.azure_event_hubs - -`loki.source.azure_event_hubs` receives Azure Event Hubs messages by making use of an Apache Kafka -endpoint on Event Hubs. For more information, see -the [Azure Event Hubs documentation](https://learn.microsoft.com/en-us/azure/event-hubs/azure-event-hubs-kafka-overview). - -To learn more about streaming Azure logs to an Azure Event Hubs, refer to -Microsoft's tutorial on how to [Stream Azure Active Directory logs to an Azure event hub](https://learn.microsoft.com/en-us/azure/active-directory/reports-monitoring/tutorial-azure-monitor-stream-logs-to-event-hub). - -Note that an Apache Kafka endpoint is not available within the Basic pricing plan. For more information, see -the [Event Hubs pricing page](https://azure.microsoft.com/en-us/pricing/details/event-hubs/). - -Multiple `loki.source.azure_event_hubs` components can be specified by giving them -different labels. - -## Usage - -```river -loki.source.azure_event_hubs "LABEL" { - fully_qualified_namespace = "HOST:PORT" - event_hubs = EVENT_HUB_LIST - forward_to = RECEIVER_LIST - - authentication { - mechanism = "AUTHENTICATION_MECHANISM" - } -} -``` - -## Arguments - -`loki.source.azure_event_hubs` supports the following arguments: - - Name | Type | Description | Default | Required ------------------------------|----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|---------- - `fully_qualified_namespace` | `string` | Event hub namespace. | | yes - `event_hubs` | `list(string)` | Event Hubs to consume. | | yes - `group_id` | `string` | The Kafka consumer group id. | `"loki.source.azure_event_hubs"` | no - `assignor` | `string` | The consumer group rebalancing strategy to use. | `"range"` | no - `use_incoming_timestamp` | `bool` | Whether or not to use the timestamp received from Azure Event Hub. | `false` | no - `labels` | `map(string)` | The labels to associate with each received event. | `{}` | no - `forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes - `relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no - `disallow_custom_messages` | `bool` | Whether to ignore messages that don't match the [schema](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema) for Azure resource logs. | `false` | no - `relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no - -The `fully_qualified_namespace` argument must refer to a full `HOST:PORT` that points to your event hub, such as `NAMESPACE.servicebus.windows.net:9093`. -The `assignor` argument must be set to one of `"range"`, `"roundrobin"`, or `"sticky"`. - -The `relabel_rules` field can make use of the `rules` export value from a -`loki.relabel` component to apply one or more relabeling rules to log entries -before they're forwarded to the list of receivers in `forward_to`. - -### Labels - -The `labels` map is applied to every message that the component reads. - -The following internal labels prefixed with `__` are available but are discarded if not relabeled: - -- `__meta_kafka_message_key` -- `__meta_kafka_topic` -- `__meta_kafka_partition` -- `__meta_kafka_member_id` -- `__meta_kafka_group_id` -- `__azure_event_hubs_category` - -## Blocks - -The following blocks are supported inside the definition of `loki.source.azure_event_hubs`: - - Hierarchy | Name | Description | Required -----------------|------------------|----------------------------------------------------|---------- - authentication | [authentication] | Authentication configuration with Azure Event Hub. | yes - -[authentication]: #authentication-block - -### authentication block - -The `authentication` block defines the authentication method when communicating with Azure Event Hub. - - Name | Type | Description | Default | Required ----------------------|----------------|---------------------------------------------------------------------------|---------|---------- - `mechanism` | `string` | Authentication mechanism. | | yes - `connection_string` | `string` | Event Hubs ConnectionString for authentication on Azure Cloud. | | no - `scopes` | `list(string)` | Access token scopes. Default is `fully_qualified_namespace` without port. | | no - -`mechanism` supports the values `"connection_string"` and `"oauth"`. If `"connection_string"` is used, -you must set the `connection_string` attribute. If `"oauth"` is used, you must configure one of the supported credential -types as documented -here: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/README.md#credential-types via environment -variables or Azure CLI. - -## Exported fields - -`loki.source.azure_event_hubs` does not export any fields. - -## Component health - -`loki.source.azure_event_hubs` is only reported as unhealthy if given an invalid -configuration. - -## Debug information - -`loki.source.azure_event_hubs` does not expose additional debug info. - -## Example - -This example consumes messages from Azure Event Hub and uses OAuth to authenticate itself. - -```river -loki.source.azure_event_hubs "example" { - fully_qualified_namespace = "my-ns.servicebus.windows.net:9093" - event_hubs = ["gw-logs"] - forward_to = [loki.write.example.receiver] - - authentication { - mechanism = "oauth" - } -} - -loki.write "example" { - endpoint { - url = "loki:3100/api/v1/push" - } -} -``` - -## Compatible components - -`loki.source.azure_event_hubs` can accept arguments from the following components: - -- Components that export [Loki `LogsReceiver`](../../compatibility/#loki-logsreceiver-exporters) - - -{{< admonition type="note" >}} -Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. -Refer to the linked documentation for more details. -{{< /admonition >}} - - diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md deleted file mode 100644 index eccaf51f9f..0000000000 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.otlphttp/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.otlphttp/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.otlphttp/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlphttp/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.otlphttp/ -description: Learn about otelcol.exporter.otlphttp -title: otelcol.exporter.otlphttp ---- - -# otelcol.exporter.otlphttp - -`otelcol.exporter.otlphttp` accepts telemetry data from other `otelcol` -components and writes them over the network using the OTLP HTTP protocol. - -> **NOTE**: `otelcol.exporter.otlphttp` is a wrapper over the upstream -> OpenTelemetry Collector `otlphttp` exporter. Bug reports or feature requests -> will be redirected to the upstream repository, if necessary. - -Multiple `otelcol.exporter.otlphttp` components can be specified by giving them -different labels. - -## Usage - -```river -otelcol.exporter.otlphttp "LABEL" { - client { - endpoint = "HOST:PORT" - } -} -``` - -## Arguments - -`otelcol.exporter.otlphttp` supports the following arguments: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`metrics_endpoint` | `string` | The endpoint to send metrics to. | `client.endpoint + "/v1/metrics"` | no -`logs_endpoint` | `string` | The endpoint to send logs to. | `client.endpoint + "/v1/logs"` | no -`traces_endpoint` | `string` | The endpoint to send traces to. | `client.endpoint + "/v1/traces"` | no - -The default value depends on the `endpoint` field set in the required `client` -block. If set, these arguments override the `client.endpoint` field for the -corresponding signal. - -## Blocks - -The following blocks are supported inside the definition of -`otelcol.exporter.otlphttp`: - -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | Configures the HTTP server to send telemetry data to. | yes -client > tls | [tls][] | Configures TLS for the HTTP client. | no -sending_queue | [sending_queue][] | Configures batching of data before sending. | no -retry_on_failure | [retry_on_failure][] | Configures retry mechanism for failed requests. | no -debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no - -The `>` symbol indicates deeper levels of nesting. For example, `client > tls` -refers to a `tls` block defined inside a `client` block. - -[client]: #client-block -[tls]: #tls-block -[sending_queue]: #sending_queue-block -[retry_on_failure]: #retry_on_failure-block -[debug_metrics]: #debug_metrics-block - -### client block - -The `client` block configures the HTTP client used by the component. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | The target URL to send telemetry data to. | | yes -`read_buffer_size` | `string` | Size of the read buffer the HTTP client uses for reading server responses. | `0` | no -`write_buffer_size` | `string` | Size of the write buffer the HTTP client uses for writing requests. | `"512KiB"` | no -`timeout` | `duration` | Time to wait before marking a request as failed. | `"30s"` | no -`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no -`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no -`max_idle_conns` | `int` | Limits the number of idle HTTP connections the client can keep open. | `100` | no -`max_idle_conns_per_host` | `int` | Limits the number of idle HTTP connections the host can keep open. | `0` | no -`max_conns_per_host` | `int` | Limits the total (dialing,active, and idle) number of connections per host. | `0` | no -`idle_conn_timeout` | `duration` | Time to wait before an idle connection closes itself. | `"90s"` | no -`disable_keep_alives`| `bool` | Disable HTTP keep-alive. | `false` | no -`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no - -Setting `disable_keep_alives` to `true` will result in significant overhead establishing a new HTTP(s) connection for every request. -Before enabling this option, consider whether changes to idle connection settings can achieve your goal. - -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} - -### tls block - -The `tls` block configures TLS settings used for the connection to the HTTP -server. - -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} - -### sending_queue block - -The `sending_queue` block configures an in-memory buffer of batches before data is sent -to the HTTP server. - -{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} - -### retry_on_failure block - -The `retry_on_failure` block configures how failed requests to the HTTP server are -retried. - -{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} - -### debug_metrics block - -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} - -## Exported fields - -The following fields are exported and can be referenced by other components: - -Name | Type | Description ----- | ---- | ----------- -`input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. - -`input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, -logs, or traces). - -## Component health - -`otelcol.exporter.otlphttp` is only reported as unhealthy if given an invalid -configuration. - -## Debug information - -`otelcol.exporter.otlphttp` does not expose any component-specific debug -information. - -## Example - -This example creates an exporter to send data to a locally running Grafana -Tempo without TLS: - -```river -otelcol.exporter.otlphttp "tempo" { - client { - endpoint = "http://tempo:4317" - tls { - insecure = true - insecure_skip_verify = true - } - } -} -``` - - -## Compatible components - -`otelcol.exporter.otlphttp` has exports that can be consumed by the following components: - -- Components that consume [OpenTelemetry `otelcol.Consumer`](../../compatibility/#opentelemetry-otelcolconsumer-consumers) - -{{< admonition type="note" >}} -Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. -Refer to the linked documentation for more details. -{{< /admonition >}} - - \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md b/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md deleted file mode 100644 index 893d38b591..0000000000 --- a/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md +++ /dev/null @@ -1,309 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.extension.jaeger_remote_sampling/ -description: Learn about otelcol.extension.jaeger_remote_sampling -label: - stage: experimental -title: otelcol.extension.jaeger_remote_sampling ---- - -# otelcol.extension.jaeger_remote_sampling - -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} - -`otelcol.extension.jaeger_remote_sampling` serves a specified Jaeger remote sampling -document. - -> **NOTE**: `otelcol.extension.jaeger_remote_sampling` is a wrapper over the upstream OpenTelemetry -> Collector `jaegerremotesampling` extension. Bug reports or feature requests will be -> redirected to the upstream repository, if necessary. - -Multiple `otelcol.extension.jaeger_remote_sampling` components can be specified by giving them -different labels. - -## Usage - -```river -otelcol.extension.jaeger_remote_sampling "LABEL" { - source { - } -} -``` - -## Arguments - -`otelcol.extension.jaeger_remote_sampling` doesn't support any arguments and is configured fully -through inner blocks. - -## Blocks - -The following blocks are supported inside the definition of -`otelcol.extension.jaeger_remote_sampling`: - -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -http | [http][] | Configures the http server to serve Jaeger remote sampling. | no -http > tls | [tls][] | Configures TLS for the HTTP server. | no -http > cors | [cors][] | Configures CORS for the HTTP server. | no -grpc | [grpc][] | Configures the grpc server to serve Jaeger remote sampling. | no -grpc > tls | [tls][] | Configures TLS for the gRPC server. | no -grpc > keepalive | [keepalive][] | Configures keepalive settings for the configured server. | no -grpc > keepalive > server_parameters | [server_parameters][] | Server parameters used to configure keepalive settings. | no -grpc > keepalive > enforcement_policy | [enforcement_policy][] | Enforcement policy for keepalive settings. | no -source | [source][] | Configures the Jaeger remote sampling document. | yes -source > remote | [remote][] | Configures the gRPC client used to retrieve the Jaeger remote sampling document. | no -source > remote > tls | [tls][] | Configures TLS for the gRPC client. | no -source > remote > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no - -The `>` symbol indicates deeper levels of nesting. For example, `grpc > tls` -refers to a `tls` block defined inside a `grpc` block. - -[http]: #http-block -[tls]: #tls-block -[cors]: #cors-block -[grpc]: #grpc-block -[keepalive]: #keepalive-block -[server_parameters]: #server_parameters-block -[enforcement_policy]: #enforcement_policy-block -[source]: #source-block -[remote]: #remote-block -[tls_client]: #tls-client-block -[keepalive_client]: #keepalive-client-block - -### http block - -The `http` block configures an HTTP server which serves the Jaeger remote -sampling document. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | `host:port` to listen for traffic on. | `"0.0.0.0:5778"` | no -`max_request_body_size` | `string` | Maximum request body size the server will allow. No limit when unset. | | no -`include_metadata` | `boolean` | Propagate incoming connection metadata to downstream consumers. | | no - -### tls block - -The `tls` block configures TLS settings used for a server. If the `tls` block -isn't provided, TLS won't be used for connections to the server. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`ca_file` | `string` | Path to the CA file. | | no -`cert_file` | `string` | Path to the TLS certificate. | | no -`key_file` | `string` | Path to the TLS certificate key. | | no -`min_version` | `string` | Minimum acceptable TLS version for connections. | `"TLS 1.2"` | no -`max_version` | `string` | Maximum acceptable TLS version for connections. | `"TLS 1.3"` | no -`reload_interval` | `duration` | Frequency to reload the certificates. | | no -`client_ca_file` | `string` | Path to the CA file used to authenticate client certificates. | | no - -### cors block - -The `cors` block configures CORS settings for an HTTP server. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`allowed_origins` | `list(string)` | Allowed values for the `Origin` header. | | no -`allowed_headers` | `list(string)` | Accepted headers from CORS requests. | `["X-Requested-With"]` | no -`max_age` | `number` | Configures the `Access-Control-Max-Age` response header. | | no - -The `allowed_headers` specifies which headers are acceptable from a CORS -request. The following headers are always implicitly allowed: - -* `Accept` -* `Accept-Language` -* `Content-Type` -* `Content-Language` - -If `allowed_headers` includes `"*"`, all headers will be permitted. - -### grpc block - -The `grpc` block configures a gRPC server which serves the Jaeger remote - sampling document. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | `host:port` to listen for traffic on. | `"0.0.0.0:14250"` | no -`transport` | `string` | Transport to use for the gRPC server. | `"tcp"` | no -`max_recv_msg_size` | `string` | Maximum size of messages the server will accept. 0 disables a limit. | | no -`max_concurrent_streams` | `number` | Limit the number of concurrent streaming RPC calls. | | no -`read_buffer_size` | `string` | Size of the read buffer the gRPC server will use for reading from clients. | `"512KiB"` | no -`write_buffer_size` | `string` | Size of the write buffer the gRPC server will use for writing to clients. | | no -`include_metadata` | `boolean` | Propagate incoming connection metadata to downstream consumers. | | no - -### keepalive block - -The `keepalive` block configures keepalive settings for connections to a gRPC -server. - -`keepalive` doesn't support any arguments and is configured fully through inner -blocks. - -### server_parameters block - -The `server_parameters` block controls keepalive and maximum age settings for gRPC -servers. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`max_connection_idle` | `duration` | Maximum age for idle connections. | `"infinity"` | no -`max_connection_age` | `duration` | Maximum age for non-idle connections. | `"infinity"` | no -`max_connection_age_grace` | `duration` | Time to wait before forcibly closing connections. | `"infinity"` | no -`time` | `duration` | How often to ping inactive clients to check for liveness. | `"2h"` | no -`timeout` | `duration` | Time to wait before closing inactive clients that do not respond to liveness checks. | `"20s"` | no - -### enforcement_policy block - -The `enforcement_policy` block configures the keepalive enforcement policy for -gRPC servers. The server will close connections from clients that violate the -configured policy. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`min_time` | `duration` | Minimum time clients should wait before sending a keepalive ping. | `"5m"` | no -`permit_without_stream` | `boolean` | Allow clients to send keepalive pings when there are no active streams. | `false` | no - -### source block - -The `source` block configures the method of retrieving the Jaeger remote sampling document -that is served by the servers specified in the `grpc` and `http` blocks. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`file` | `string` | A local file containing a Jaeger remote sampling document. | `""` | no -`reload_interval` | `duration` | The interval at which to reload the specified file. Leave at 0 to never reload. | `0` | no -`content` | `string` | A string containing the Jaeger remote sampling contents directly. | `""` | no - -Exactly one of the `file` argument, `content` argument or `remote` block must be specified. - -### remote block - -The `remote` block configures the gRPC client used by the component. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | `host:port` to send telemetry data to. | | yes -`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no -`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no -`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no -`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no -`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no -`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no -`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no -`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no - -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} - -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} - -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} - -An HTTP proxy can be configured through the following environment variables: - -* `HTTPS_PROXY` -* `NO_PROXY` - -The `HTTPS_PROXY` environment variable specifies a URL to use for proxying -requests. Connections to the proxy are established via [the `HTTP CONNECT` -method][HTTP CONNECT]. - -The `NO_PROXY` environment variable is an optional list of comma-separated -hostnames for which the HTTPS proxy should _not_ be used. Each hostname can be -provided as an IP address (`1.2.3.4`), an IP address in CIDR notation -(`1.2.3.4/8`), a domain name (`example.com`), or `*`. A domain name matches -that domain and all subdomains. A domain name with a leading "." -(`.example.com`) matches subdomains only. `NO_PROXY` is only read when -`HTTPS_PROXY` is set. - -Because `otelcol.extension.jaeger_remote_sampling` uses gRPC, the configured proxy server must be -able to handle and proxy HTTP/2 traffic. - -[HTTP CONNECT]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/CONNECT - -### tls client block - -The `tls` block configures TLS settings used for the connection to the gRPC -server. - -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} - -### keepalive client block - -The `keepalive` block configures keepalive settings for gRPC client -connections. - -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`ping_wait` | `duration` | How often to ping the server after no activity. | | no -`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no -`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no - -## Component health - -`otelcol.extension.jaeger_remote_sampling` is only reported as unhealthy if given an invalid -configuration. - -## Debug information - -`otelcol.extension.jaeger_remote_sampling` does not expose any component-specific debug information. - -## Examples - -### Serving from a file - -This example configures the Jaeger remote sampling extension to load a local json document and -serve it over the default http port of 5778. Currently this config style exists for consistency -with upstream Opentelemetry Collector components and may be removed. - -```river -otelcol.extension.jaeger_remote_sampling "example" { - http { - } - source { - file = "/path/to/jaeger-sampling.json" - reload_interval = "10s" - } -} -``` - -### Serving from another component - - -This example uses the output of a component to determine what sampling -rules to serve: - -```river -local.file "sampling" { - filename = "/path/to/jaeger-sampling.json" -} - -otelcol.extension.jaeger_remote_sampling "example" { - http { - } - source { - content = local.file.sampling.content - } -} -``` diff --git a/docs/sources/flow/reference/config-blocks/_index.md b/docs/sources/flow/reference/config-blocks/_index.md deleted file mode 100644 index bf528e3a16..0000000000 --- a/docs/sources/flow/reference/config-blocks/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/ -description: Learn about configuration blocks -title: Configuration blocks -weight: 200 ---- - -# Configuration blocks - -Configuration blocks are optional top-level blocks that can be used to -configure various parts of the {{< param "PRODUCT_NAME" >}} process. Each configuration block can -only be defined once. - -Configuration blocks are _not_ components, so they have no exports. - -{{< section >}} diff --git a/docs/sources/flow/reference/stdlib/_index.md b/docs/sources/flow/reference/stdlib/_index.md deleted file mode 100644 index 8f42f4bc28..0000000000 --- a/docs/sources/flow/reference/stdlib/_index.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/reference/stdlib/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/ -- standard-library/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/ -description: The standard library is a list of functions used in expressions when - assigning values to attributes -title: Standard library -weight: 400 ---- - -# Standard library - -The standard library is a list of functions which can be used in expressions -when assigning values to attributes. - -All standard library functions are [pure functions](https://en.wikipedia.org/wiki/Pure_function): they will always return the same -output if given the same input. - -{{< section >}} diff --git a/docs/sources/flow/reference/stdlib/coalesce.md b/docs/sources/flow/reference/stdlib/coalesce.md deleted file mode 100644 index 73f5cd4448..0000000000 --- a/docs/sources/flow/reference/stdlib/coalesce.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/coalesce/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/coalesce/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/coalesce/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/coalesce/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/coalesce/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/coalesce/ -description: Learn about coalesce -title: coalesce ---- - -# coalesce - -`coalesce` takes any number of arguments and returns the first one that isn't null, an empty string, empty list, or an empty object. -It is useful for obtaining a default value, such as if an environment variable isn't defined. -If no argument is non-empty or non-zero, the last argument is returned. - -## Examples - -``` -> coalesce("a", "b") -a -> coalesce("", "b") -b -> coalesce(env("DOES_NOT_EXIST"), "c") -c -``` diff --git a/docs/sources/flow/reference/stdlib/concat.md b/docs/sources/flow/reference/stdlib/concat.md deleted file mode 100644 index 36e7eba906..0000000000 --- a/docs/sources/flow/reference/stdlib/concat.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/concat/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/concat/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/concat/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/concat/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/concat/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/concat/ -description: Learn about concat -title: concat ---- - -# concat - -The `concat` function concatenates one or more lists of values into a single -list. Each argument to `concat` must be a list value. Elements within the list -can be any type. - -## Examples - -``` -> concat([]) -[] - -> concat([1, 2], [3, 4]) -[1, 2, 3, 4] - -> concat([1, 2], [], [bool, null]) -[1, 2, bool, null] - -> concat([[1, 2], [3, 4]], [[5, 6]]) -[[1, 2], [3, 4], [5, 6]] -``` diff --git a/docs/sources/flow/reference/stdlib/constants.md b/docs/sources/flow/reference/stdlib/constants.md deleted file mode 100644 index 3caf5c336a..0000000000 --- a/docs/sources/flow/reference/stdlib/constants.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/constants/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/constants/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/constants/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/constants/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/constants/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/constants/ -description: Learn about constants -title: constants ---- - -# constants - -The `constants` object exposes a list of constant values about the system -{{< param "PRODUCT_NAME" >}} is running on: - -* `constants.hostname`: The hostname of the machine {{< param "PRODUCT_NAME" >}} is running - on. -* `constants.os`: The operating system {{< param "PRODUCT_NAME" >}} is running on. -* `constants.arch`: The architecture of the system {{< param "PRODUCT_NAME" >}} is running on. - -## Examples - -``` -> constants.hostname -"my-hostname" - -> constants.os -"linux" - -> constants.arch -"amd64" -``` diff --git a/docs/sources/flow/reference/stdlib/env.md b/docs/sources/flow/reference/stdlib/env.md deleted file mode 100644 index 49a65d1a6a..0000000000 --- a/docs/sources/flow/reference/stdlib/env.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/env/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/env/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/env/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/env/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/env/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/env/ -description: Learn about env -title: env ---- - -# env - -The `env` function gets the value of an environment variable from the system {{< param "PRODUCT_NAME" >}} is running on. -If the environment variable does not exist, `env` returns an empty string. - -## Examples - -``` -> env("HOME") -"/home/grafana-agent" - -> env("DOES_NOT_EXIST") -"" -``` diff --git a/docs/sources/flow/reference/stdlib/join.md b/docs/sources/flow/reference/stdlib/join.md deleted file mode 100644 index 3203585c81..0000000000 --- a/docs/sources/flow/reference/stdlib/join.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/join/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/join/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/join/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/join/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/join/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/join/ -description: Learn about join -title: join ---- - -# join - -`join` all items in an array into a string, using a character as separator. - -```river -join(list, separator) -``` - -## Examples - -```river -> join(["foo", "bar", "baz"], "-") -"foo-bar-baz" -> join(["foo", "bar", "baz"], ", ") -"foo, bar, baz" -> join(["foo"], ", ") -"foo" -``` diff --git a/docs/sources/flow/reference/stdlib/json_decode.md b/docs/sources/flow/reference/stdlib/json_decode.md deleted file mode 100644 index d56fc45dab..0000000000 --- a/docs/sources/flow/reference/stdlib/json_decode.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/json_decode/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/json_decode/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/json_decode/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/json_decode/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/json_decode/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/json_decode/ -description: Learn about json_decode -title: json_decode ---- - -# json_decode - -The `json_decode` function decodes a string representing JSON into a River -value. `json_decode` fails if the string argument provided cannot be parsed as -JSON. - -A common use case of `json_decode` is to decode the output of a -[`local.file`][] component to a River value. - -> Remember to escape double quotes when passing JSON string literals to `json_decode`. -> -> For example, the JSON value `{"key": "value"}` is properly represented by the -> string `"{\"key\": \"value\"}"`. - -## Examples - -``` -> json_decode("15") -15 - -> json_decode("[1, 2, 3]") -[1, 2, 3] - -> json_decode("null") -null - -> json_decode("{\"key\": \"value\"}") -{ - key = "value", -} - -> json_decode(local.file.some_file.content) -"Hello, world!" -``` - -[`local.file`]: {{< relref "../components/local.file.md" >}} diff --git a/docs/sources/flow/reference/stdlib/json_path.md b/docs/sources/flow/reference/stdlib/json_path.md deleted file mode 100644 index 91058e6e31..0000000000 --- a/docs/sources/flow/reference/stdlib/json_path.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/json_path/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/json_path/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/json_path/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/json_path/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/json_path/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/json_path/ -description: Learn about json_path -title: json_path ---- - -# json_path - -The `json_path` function lookup values using [jsonpath](https://goessner.net/articles/JsonPath/) syntax. - -The function expects two strings. The first string is the JSON string used look up values. The second string is the jsonpath expression. - -`json_path` always returns a list of values. If the jsonpath expression does not match any values, an empty list is returned. - -A common use case of `json_path` is to decode and filter the output of a [`local.file`][] or [`remote.http`][] component to a River value. - -> Remember to escape double quotes when passing JSON string literals to `json_path`. -> -> For example, the JSON value `{"key": "value"}` is properly represented by the -> string `"{\"key\": \"value\"}"`. - -## Examples - -``` -> json_path("{\"key\": \"value\"}", ".key") -["value"] - - -> json_path("[{\"name\": \"Department\",\"value\": \"IT\"},{\"name\":\"TestStatus\",\"value\":\"Pending\"}]", "[?(@.name == \"Department\")].value") -["IT"] - -> json_path("{\"key\": \"value\"}", ".nonexists") -[] - -> json_path("{\"key\": \"value\"}", ".key")[0] -value - -``` - -[`local.file`]: {{< relref "../components/local.file.md" >}} -[`remote.http`]: {{< relref "../components/remote.http.md" >}} diff --git a/docs/sources/flow/reference/stdlib/nonsensitive.md b/docs/sources/flow/reference/stdlib/nonsensitive.md deleted file mode 100644 index a2bb0bd31d..0000000000 --- a/docs/sources/flow/reference/stdlib/nonsensitive.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/nonsensitive/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/nonsensitive/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/nonsensitive/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/nonsensitive/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/nonsensitive/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/nonsensitive/ -description: Learn about nonsensitive -title: nonsensitive ---- - -# nonsensitive - -`nonsensitive` converts a [secret][] value back into a string. - -> **WARNING**: Only use `nonsensitive` when you are positive that the value -> being converted back to a string is not a sensitive value. -> -> Strings resulting from calls to `nonsensitive` will be displayed in plaintext -> in the UI and internal API calls. - -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} - -## Examples - -``` -// Assuming `sensitive_value` is a secret: - -> sensitive_value -(secret) -> nonsensitive(sensitive_value) -"Hello, world!" -``` diff --git a/docs/sources/flow/reference/stdlib/replace.md b/docs/sources/flow/reference/stdlib/replace.md deleted file mode 100644 index 2c1eb383f3..0000000000 --- a/docs/sources/flow/reference/stdlib/replace.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/replace/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/replace/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/replace/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/replace/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/replace/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/replace/ -description: Learn about replace -title: replace ---- - -# replace - -`replace` searches a string for a substring, and replaces each occurrence of the substring with a replacement string. - -```river -replace(string, substring, replacement) -``` - -## Examples - -```river -> replace("1 + 2 + 3", "+", "-") -"1 - 2 - 3" -``` diff --git a/docs/sources/flow/reference/stdlib/split.md b/docs/sources/flow/reference/stdlib/split.md deleted file mode 100644 index 3087ca1536..0000000000 --- a/docs/sources/flow/reference/stdlib/split.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/split/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/split/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/split/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/split/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/split/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/split/ -description: Learn about split -title: split ---- - -# split - -`split` produces a list by dividing a string at all occurrences of a separator. - -```river -split(list, separator) -``` - -## Examples - -```river -> split("foo,bar,baz", "," ) -["foo", "bar", "baz"] - -> split("foo", ",") -["foo"] - -> split("", ",") -[""] -``` diff --git a/docs/sources/flow/reference/stdlib/to_lower.md b/docs/sources/flow/reference/stdlib/to_lower.md deleted file mode 100644 index 8c252fb354..0000000000 --- a/docs/sources/flow/reference/stdlib/to_lower.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/to_lower/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/to_lower/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/to_lower/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/to_lower/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/to_lower/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/to_lower/ -description: Learn about to_lower -title: to_lower ---- - -# to_lower - -`to_lower` converts all uppercase letters in a string to lowercase. - -## Examples - -```river -> to_lower("HELLO") -"hello" -``` diff --git a/docs/sources/flow/reference/stdlib/to_upper.md b/docs/sources/flow/reference/stdlib/to_upper.md deleted file mode 100644 index aef26d5ff6..0000000000 --- a/docs/sources/flow/reference/stdlib/to_upper.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/to_upper/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/to_upper/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/to_upper/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/to_upper/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/to_upper/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/to_upper/ -description: Learn about to_upper -title: to_upper ---- - -# to_upper - -`to_upper` converts all lowercase letters in a string to uppercase. - -## Examples - -```river -> to_upper("hello") -"HELLO" -``` diff --git a/docs/sources/flow/reference/stdlib/trim.md b/docs/sources/flow/reference/stdlib/trim.md deleted file mode 100644 index 5023d1f213..0000000000 --- a/docs/sources/flow/reference/stdlib/trim.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/trim/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/trim/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim/ -description: Learn about trim -title: trim ---- - -# trim - -`trim` removes the specified set of characters from the start and end of a string. - -```river -trim(string, str_character_set) -``` - -## Examples - -```river -> trim("?!hello?!", "!?") -"hello" - -> trim("foobar", "far") -"oob" - -> trim(" hello! world.! ", "! ") -"hello! world." -``` diff --git a/docs/sources/flow/reference/stdlib/trim_prefix.md b/docs/sources/flow/reference/stdlib/trim_prefix.md deleted file mode 100644 index 33d716f133..0000000000 --- a/docs/sources/flow/reference/stdlib/trim_prefix.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/trim_prefix/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/trim_prefix/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim_prefix/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim_prefix/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim_prefix/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim_prefix/ -description: Learn about trim_prefix -title: trim_prefix ---- - -# trim_prefix - -`trim_prefix` removes the prefix from the start of a string. If the string does not start with the prefix, the string is returned unchanged. - -## Examples - -```river -> trim_prefix("helloworld", "hello") -"world" -``` diff --git a/docs/sources/flow/reference/stdlib/trim_space.md b/docs/sources/flow/reference/stdlib/trim_space.md deleted file mode 100644 index 5e13e0ba0d..0000000000 --- a/docs/sources/flow/reference/stdlib/trim_space.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/trim_space/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/trim_space/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim_space/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim_space/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim_space/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim_space/ -description: Learn about trim_space -title: trim_space ---- - -# trim_space - -`trim_space` removes any whitespace characters from the start and end of a string. - -## Examples - -```river -> trim_space(" hello\n\n") -"hello" -``` diff --git a/docs/sources/flow/reference/stdlib/trim_suffix.md b/docs/sources/flow/reference/stdlib/trim_suffix.md deleted file mode 100644 index 4741007ebe..0000000000 --- a/docs/sources/flow/reference/stdlib/trim_suffix.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- ../../configuration-language/standard-library/trim_suffix/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/trim_suffix/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/trim_suffix/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/trim_suffix/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/trim_suffix/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/trim_suffix/ -description: Learn about trim_suffix -title: trim_suffix ---- - -# trim_suffix - -`trim_suffix` removes the suffix from the end of a string. - -## Examples - -```river -> trim_suffix("helloworld", "world") -"hello" -``` diff --git a/docs/sources/flow/release-notes.md b/docs/sources/flow/release-notes.md deleted file mode 100644 index 12d1578685..0000000000 --- a/docs/sources/flow/release-notes.md +++ /dev/null @@ -1,634 +0,0 @@ ---- -aliases: -- ./upgrade-guide/ -- /docs/grafana-cloud/agent/flow/release-notes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/release-notes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/release-notes/ -- /docs/grafana-cloud/send-data/agent/flow/release-notes/ -canonical: https://grafana.com/docs/agent/latest/flow/release-notes/ -description: Release notes for Grafana Agent Flow -menuTitle: Release notes -title: Release notes for Grafana Agent Flow -weight: 999 ---- - -# Release notes for {{% param "PRODUCT_NAME" %}} - -The release notes provide information about deprecations and breaking changes in {{< param "PRODUCT_NAME" >}}. - -For a complete list of changes to {{< param "PRODUCT_ROOT_NAME" >}}, with links to pull requests and related issues when available, refer to the [Changelog](https://github.com/grafana/agent/blob/main/CHANGELOG.md). - -{{< admonition type="note" >}} -These release notes are specific to {{< param "PRODUCT_NAME" >}}. -Other release notes for the different {{< param "PRODUCT_ROOT_NAME" >}} variants are contained on separate pages: - -* [Static mode release notes][release-notes-static] -* [Static mode Kubernetes operator release notes][release-notes-operator] - -[release-notes-static]: {{< relref "../static/release-notes.md" >}} -[release-notes-operator]: {{< relref "../operator/release-notes.md" >}} -{{< /admonition >}} - -## v0.40 - -### Breaking change: Prohibit the configuration of services within modules. - -Previously it was possible to configure the HTTP service via the [HTTP config block](https://grafana.com/docs/agent/v0.39/flow/reference/config-blocks/http/) inside of a module. -This functionality is now only available in the main configuration. - -### Breaking change: Change the default value of `disable_high_cardinality_metrics` to `true`. - -The `disable_high_cardinality_metrics` configuration argument is used by `otelcol.exporter` components such as `otelcol.exporter.otlp`. -If you need to see high cardinality metrics containing labels such as IP addresses and port numbers, you now have to explicitly set `disable_high_cardinality_metrics` to `false`. - -### Breaking change: Rename component `prometheus.exporter.agent` to `prometheus.exporter.self` - -The name `prometheus.exporter.agent` is potentially ambiguous and can be misinterpreted as an exporter for Prometheus Agent. -The new name reflects the component's true purpose as an exporter of the process's own metrics. - -## v0.39 - -### Breaking change: `otelcol.receiver.prometheus` will drop all `otel_scope_info` metrics when converting them to OTLP - -* If the `otel_scope_info` metric has the `otel_scope_name` and `otel_scope_version` labels, - their values are used to set the OTLP Instrumentation Scope name and version, respectively. -* Labels for `otel_scope_info` metrics other than `otel_scope_name` and `otel_scope_version` - are added as scope attributes with the matching name and version. - -### Breaking change: label for `target` block in `prometheus.exporter.blackbox` is removed - -Previously in `prometheus.exporter.blackbox`, the `target` block requires a label which is used in job's name. -In this version, user needs to be specify `name` attribute instead, which allow less restrictive naming. - -Old configuration example: - -```river -prometheus.exporter.blackbox "example" { - config_file = "blackbox_modules.yml" - - target "grafana" { - address = "http://grafana.com" - module = "http_2xx" - labels = { - "env": "dev", - } - } -} -``` - -New configuration example: - -```river -prometheus.exporter.blackbox "example" { - config_file = "blackbox_modules.yml" - - target { - name = "grafana" - address = "http://grafana.com" - module = "http_2xx" - labels = { - "env": "dev", - } - } -} -``` - -## v0.38 - -### Breaking change: `otelcol.exporter.jaeger` component removed - -The deprecated `otelcol.exporter.jaeger` component has been removed. To send -traces to Jaeger, use `otelcol.exporter.otlp` and a version of Jaeger that -supports OTLP. - -## v0.37 - -### Breaking change: Renamed `non_indexed_labels` Loki processing stage to `structured_metadata`. - -If you use the Loki processing stage in your {{< param "PRODUCT_NAME" >}} configuration, you must rename the `non_indexed_labels` pipeline stage definition to `structured_metadata`. - -Old configuration example: - -```river -stage.non_indexed_labels { - values = {"app" = ""} -} -``` - -New configuration example: -```river -stage.structured_metadata { - values = {"app" = ""} -} -``` - -### Breaking change: `otelcol.exporter.prometheus` scope labels updated - -There are 2 changes to the way scope labels work for this component. - -* Previously, the `include_scope_info` argument would trigger including -`otel_scope_name` and `otel_scope_version` in metrics. This is now defaulted -to `true` and controlled via the `include_scope_labels` argument. - -* A bugfix was made to rename `otel_scope_info` metric labels from -`name` to `otel_scope_name` and `version` to `otel_scope_version`. This is -now correct with the OTLP Instrumentation Scope specification. - -### Breaking change: `prometheus.exporter.unix` now requires a label. - -Previously the exporter was a singleton and did not require a label. The exporter now can be used multiple times and -needs a label. - -Old configuration example: - -```river -prometheus.exporter.unix { /* ... */ } -``` - -New configuration example: - -```river -prometheus.exporter.unix "example" { /* ... */ } -``` - -## v0.36 - -### Breaking change: The default value of `retry_on_http_429` is changed to `true` for the `queue_config` in `prometheus.remote_write` - -The default value of `retry_on_http_429` is changed from `false` to `true` for the `queue_config` block in `prometheus.remote_write` -so that {{< param "PRODUCT_ROOT_NAME" >}} can retry sending and avoid data being lost for metric pipelines by default. - -* If you set the `retry_on_http_429` explicitly - no action is required. -* If you do not set `retry_on_http_429` explicitly and you do *not* want to retry on HTTP 429, make sure you set it to `false` as you upgrade to this new version. - -### Breaking change: `loki.source.file` no longer automatically extracts logs from compressed files - -`loki.source.file` component will no longer automatically detect and decompress -logs from compressed files (this was an undocumented behaviour). - -This file-extension-based detection of compressed files has been replaced by a -new configuration block that explicitly enables and specifies the compression -format. By default, the decompression of files is entirely disabled. - -How to migrate: - -* If {{< param "PRODUCT_NAME" >}} never reads logs from files with - extensions `.gz`, `.tar.gz`, `.z` or `.bz2` then no action is required. - > You can check what are the file extensions {{< param "PRODUCT_NAME" >}} reads from by looking - at the `path` label on `loki_source_file_file_bytes_total` metric. - -* If {{< param "PRODUCT_NAME" >}} extracts data from compressed files, please add the following - configuration block to your `loki.source.file` component: - - ```river - loki.source.file "example" { - ... - decompression { - enabled = true - format = "" - } - } - ``` - - where the `` is the appropriate compression format - - see [`loki.source.file` documentation][loki-source-file-docs] for details. - - [loki-source-file-docs]: {{< relref "./reference/components/loki.source.file.md" >}} - -## v0.35 - -### Breaking change: `auth` and `version` attributes from `walk_params` block of `prometheus.exporter.snmp` have been removed - -The `prometheus.exporter.snmp` flow component wraps a new version of SNMP exporter which introduces a new configuration file format. -This new format separates the walk and metric mappings from the connection and authentication settings. This allows for easier configuration of different -auth params without having to duplicate the full walk and metric mapping. - -Old configuration example: - -```river -prometheus.exporter.snmp "example" { - config_file = "snmp_modules.yml" - - target "network_switch_1" { - address = "192.168.1.2" - module = "if_mib" - walk_params = "public" - } - - walk_param "public" { - retries = "2" - version = "2" - auth { - community = "public" - } - } -} -``` - -New configuration example: - -```river -prometheus.exporter.snmp "example" { - config_file = "snmp_modules.yml" - - target "network_switch_1" { - address = "192.168.1.2" - module = "if_mib" - walk_params = "public" - auth = "public_v2" - } - - walk_param "public" { - retries = "2" - } -} -``` - -See [Module and Auth Split Migration](https://github.com/prometheus/snmp_exporter/blob/main/auth-split-migration.md) for more details. - -### Breaking change: `discovery.file` has been renamed to `local.file_match` - -The `discovery.file` component has been renamed to `local.file_match` to make -its purpose more clear: to find files on the local filesystem matching a -pattern. - -Renaming `discovery.file` to `local.file_match` also resolves a point of -confusion where `discovery.file` was thought to implement Prometheus' file -service discovery. - -Old configuration example: - -```river -discovery.kubernetes "k8s" { - role = "pod" -} - -discovery.relabel "k8s" { - targets = discovery.kubernetes.k8s.targets - - rule { - source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_label_name"] - target_label = "job" - separator = "/" - } - - rule { - source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] - target_label = "__path__" - separator = "/" - replacement = "/var/log/pods/*$1/*.log" - } -} - -discovery.file "pods" { - path_targets = discovery.relabel.k8s.output -} -``` - -New configuration example: - -```river -discovery.kubernetes "k8s" { - role = "pod" -} - -discovery.relabel "k8s" { - targets = discovery.kubernetes.k8s.targets - - rule { - source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_label_name"] - target_label = "job" - separator = "/" - } - - rule { - source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] - target_label = "__path__" - separator = "/" - replacement = "/var/log/pods/*$1/*.log" - } -} - -local.file_match "pods" { - path_targets = discovery.relabel.k8s.output -} -``` - -### Breaking change: `discovery_target_decode` has been removed from the River standard library - -The `discovery_target_decode` function was initially added to the River -standard library as an equivalent to Prometheus' file-based discovery and -HTTP-based discovery methods. - -However, the Prometheus discovery mechanisms have more functionality than -`discovery_target_decode`: - -* Prometheus' `file_sd_configs` can use many files based on pattern matching. -* Prometheus' `http_sd_configs` also support YAML files. - -Additionally, it is no longer an accepted pattern to have component-specific -functions to the River standard library. - -As a result, `discovery_target_decode` has been removed in favor of using -components. - -Old configuration example: - -```river -remote.http "example" { - url = URL_CONTAINING_TARGETS -} - -prometehus.scrape "example" { - targets = discovery_target_decode(remote.http.example.content) - forward_to = FORWARD_LIST -} -``` - -New configuration example: - -```river -discovery.http "example" { - url = URL_CONTAINING_TARGETS -} - -prometehus.scrape "example" { - targets = discovery.http.example.targets - forward_to = FORWARD_LIST -} -``` - -### Breaking change: The algorithm for the "hash" action of `otelcol.processor.attributes` has changed -The hash produced when using `action = "hash"` in the `otelcol.processor.attributes` flow component is now using the more secure SHA-256 algorithm. -The change was made in PR [#22831](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/22831) of opentelemetry-collector-contrib. - -### Breaking change: `otelcol.exporter.loki` now includes instrumentation scope in its output - -Additional `instrumentation_scope` information will be added to the OTLP log signal, like this: -``` -{ - "body": "Example log", - "traceid": "01020304000000000000000000000000", - "spanid": "0506070800000000", - "severity": "error", - "attributes": { - "attr1": "1", - "attr2": "2" - }, - "resources": { - "host.name": "something" - }, - "instrumentation_scope": { - "name": "example-logger-name", - "version": "v1" - } -} -``` - -### Breaking change: `otelcol.extension.jaeger_remote_sampling` removes the `/` HTTP endpoint - -The `/` HTTP endpoint was the same as the `/sampling` endpoint. The `/sampling` endpoint is still functional. -The change was made in PR [#18070](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/18070) of opentelemetry-collector-contrib. - -### Breaking change: The `remote_sampling` block has been removed from `otelcol.receiver.jaeger` - -The `remote_sampling` block in `otelcol.receiver.jaeger` has been an undocumented no-op configuration for some time, and has now been removed. -Customers are advised to use `otelcol.extension.jaeger_remote_sampling` instead. - -### Deprecation: `otelcol.exporter.jaeger` has been deprecated and will be removed in {{% param "PRODUCT_NAME" %}} v0.38.0. - -This is because Jaeger supports OTLP directly and OpenTelemetry Collector is also removing its -[Jaeger receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/jaegerexporter). - -## v0.34 - -### Breaking change: `phlare.scrape` and `phlare.write` have been renamed to `pyroscope.scrape` and `pyroscope.scrape` - -Old configuration example: - -```river -phlare.write "staging" { - endpoint { - url = "http://phlare:4100" - } -} - -phlare.scrape "default" { - targets = [ - {"__address__" = "agent:12345", "app"="agent"}, - ] - forward_to = [phlare.write.staging.receiver] -} -``` - -New configuration example: - -```river -pyroscope.write "staging" { - endpoint { - url = "http://pyroscope:4100" - } -} - -pyroscope.scrape "default" { - targets = [ - {"__address__" = "agent:12345", "app"="agent"}, - ] - forward_to = [pyroscope.write.staging.receiver] -} -``` - -## v0.33 - -### Symbolic links in Docker containers removed - -We've removed the deprecated symbolic links to `/bin/agent*` in Docker -containers, as planned in v0.31. In case you're setting a custom entrypoint, -use the new binaries that are prefixed with `/bin/grafana*`. - -## v0.32 - -### Breaking change: `http_client_config` Flow blocks merged with parent blocks - -To reduce the amount of typing required to write Flow components, the arguments -and subblocks found in `http_client_config` have been merged with their parent -blocks: - -- `discovery.docker > http_client_config` is merged into the `discovery.docker` block. -- `discovery.kubernetes > http_client_config` is merged into the `discovery.kubernetes` block. -- `loki.source.kubernetes > client > http_client_config` is merged into the `client` block. -- `loki.source.podlogs > client > http_client_config` is merged into the `client` block. -- `loki.write > endpoint > http_client_config` is merged into the `endpoint` block. -- `mimir.rules.kubernetes > http_client_config` is merged into the `mimir.rules.kubernetes` block. -- `otelcol.receiver.opencensus > grpc` is merged into the `otelcol.receiver.opencensus` block. -- `otelcol.receiver.zipkin > http` is merged into the `otelcol.receiver.zipkin` block. -- `phlare.scrape > http_client_config` is merged into the `phlare.scrape` block. -- `phlare.write > endpoint > http_client_config` is merged into the `endpoint` block. -- `prometheus.remote_write > endpoint > http_client_config` is merged into the `endpoint` block. -- `prometheus.scrape > http_client_config` is merged into the `prometheus.scrape` block. - -Old configuration example: - -```river -prometheus.remote_write "example" { - endpoint { - url = URL - - http_client_config { - basic_auth { - username = BASIC_AUTH_USERNAME - password = BASIC_AUTH_PASSWORD - } - } - } -} -``` - -New configuration example: - -```river -prometheus.remote_write "example" { - endpoint { - url = URL - - basic_auth { - username = BASIC_AUTH_USERNAME - password = BASIC_AUTH_PASSWORD - } - } -} -``` - -### Breaking change: `loki.process` stage blocks combined into new blocks - -Previously, to add a stage to `loki.process`, two blocks were needed: a block -called `stage`, then an inner block for the stage being written. Stage blocks -are now a single block called `stage.STAGENAME`. - -Old configuration example: - -```river -loki.process "example" { - forward_to = RECEIVER_LIST - - stage { - docker {} - } - - stage { - json { - expressions = { output = "log", extra = "" } - } - } -} -``` - -New configuration example: - -```river -loki.process "example" { - forward_to = RECEIVER_LIST - - stage.docker {} - - stage.json { - expressions = { output = "log", extra = "" } - } -} -``` - -### Breaking change: `client_options` block renamed in `remote.s3` component - -To synchronize naming conventions between `remote.s3` and `remote.http`, the -`client_options` block has been renamed `client`. - -Old configuration example: - -```river -remote.s3 "example" { - path = S3_PATH - - client_options { - key = ACCESS_KEY - secret = KEY_SECRET - } -} -``` - -New configuration example: - -```river -remote.s3 "example" { - path = S3_PATH - - client { - key = ACCESS_KEY - secret = KEY_SECRET - } -} -``` - -### Breaking change: `prometheus.integration.node_exporter` component name changed - -The `prometheus.integration.node_exporter` component has been renamed to -`prometheus.exporter.unix`. `unix` was chosen as a name to approximate the -\*nix-like systems the exporter supports. - -Old configuration example: - -```river -prometheus.integration.node_exporter { } -``` - -New configuration example: - -```river -prometheus.exporter.unix { } -``` - -### Breaking change: support for `EXPERIMENTAL_ENABLE_FLOW` environment variable removed - -As first announced in v0.30.0, support for using the `EXPERIMENTAL_ENABLE_FLOW` -environment variable to enable Flow mode has been removed. - -To enable {{< param "PRODUCT_NAME" >}}, set the `AGENT_MODE` environment variable to `flow`. - -## v0.31 - -### Breaking change: binary names are now prefixed with `grafana-` - -As first announced in v0.29, the `agent` release binary name is now prefixed -with `grafana-`: - -- `agent` is now `grafana-agent`. - -For the `grafana/agent` Docker container, the entrypoint is now -`/bin/grafana-agent`. A symbolic link from `/bin/agent` to the new binary has -been added. - -Symbolic links will be removed in v0.33. Custom entrypoints must be -updated prior to v0.33 to use the new binaries before the symbolic links get -removed. - -## v0.30 - -### Deprecation: `EXPERIMENTAL_ENABLE_FLOW` environment variable changed - -As part of graduating {{< param "PRODUCT_NAME" >}} to beta, the -`EXPERIMENTAL_ENABLE_FLOW` environment variable is replaced by setting -`AGENT_MODE` to `flow`. - -Setting `EXPERIMENTAL_ENABLE_FLOW` to `1` or `true` is now deprecated and -support for it will be removed for the v0.32 release. - -## v0.29 - -### Deprecation: binary names will be prefixed with `grafana-` in v0.31.0 - -The binary name `agent` has been deprecated and will be renamed to -`grafana-agent` in the v0.31.0 release. - -As part of this change, the Docker containers for the v0.31.0 release will -include symbolic links from the old binary names to the new binary names. - -There is no action to take at this time. diff --git a/docs/sources/flow/tasks/_index.md b/docs/sources/flow/tasks/_index.md deleted file mode 100644 index 4ca62e8c13..0000000000 --- a/docs/sources/flow/tasks/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/ -- getting_started/ # /docs/agent/latest/flow/getting_started/ -- getting-started/ # /docs/agent/latest/flow/getting-started/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/ -description: How to perform common tasks with Grafana Agent Flow -menuTitle: Tasks -title: Tasks with Grafana Agent Flow -weight: 200 ---- - -# Tasks with {{% param "PRODUCT_NAME" %}} - -This section details how to perform common tasks with {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/tasks/configure-agent-clustering.md b/docs/sources/flow/tasks/configure-agent-clustering.md deleted file mode 100644 index d8539914fc..0000000000 --- a/docs/sources/flow/tasks/configure-agent-clustering.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure-agent-clustering/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure-agent-clustering/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure-agent-clustering/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure-agent-clustering/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/configure-agent-clustering/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/configure-agent-clustering/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/configure-agent-clustering/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/configure-agent-clustering/ -- ../getting-started/configure-agent-clustering/ # /docs/agent/latest/flow/getting-started/configure-agent-clustering/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure-agent-clustering/ -description: Learn how to configure Grafana Agent clustering in an existing installation -menuTitle: Configure Grafana Agent clustering -title: Configure Grafana Agent clustering in an existing installation -weight: 400 ---- - -# Configure {{% param "PRODUCT_NAME" %}} clustering in an existing installation - -You can configure {{< param "PRODUCT_NAME" >}} to run with [clustering][] so that individual {{< param "PRODUCT_ROOT_NAME" >}}s can work together for workload distribution and high availability. - -> **Note:** Clustering is a [beta][] feature. Beta features are subject to breaking -> changes and may be replaced with equivalent functionality that covers the same use case. - -This topic describes how to add clustering to an existing installation. - -## Configure {{% param "PRODUCT_NAME" %}} clustering with Helm Chart - -This section guides you through enabling clustering when {{< param "PRODUCT_NAME" >}} is installed on Kubernetes using the {{< param "PRODUCT_ROOT_NAME" >}} [Helm chart][install-helm]. - -### Before you begin - -- Ensure that your `values.yaml` file has `controller.type` set to `statefulset`. - -### Steps - -To configure clustering: - -1. Amend your existing `values.yaml` file to add `clustering.enabled=true` inside the `agent` block. - - ```yaml - agent: - clustering: - enabled: true - ``` - -1. Upgrade your installation to use the new `values.yaml` file: - - ```bash - helm upgrade -f values.yaml - ``` - - Replace the following: - - - _``_: The name of the installation you chose when you installed the Helm chart. - -1. Use the {{< param "PRODUCT_NAME" >}} [UI][] to verify the cluster status: - - 1. Click **Clustering** in the navigation bar. - - 1. Ensure that all expected nodes appear in the resulting table. - -{{% docs/reference %}} -[clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering.md" -[clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering.md" -[beta]: "/docs/agent/ -> /docs/agent//stability.md#beta" -[beta]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/stability.md#beta" -[install-helm]: "/docs/agent/ -> /docs/agent//flow/get-started/install/kubernetes.md" -[install-helm]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/kubernetes.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#component-detail-page" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#component-detail-page" -{{% /docs/reference %}} diff --git a/docs/sources/flow/tasks/configure/_index.md b/docs/sources/flow/tasks/configure/_index.md deleted file mode 100644 index c44ea3dc02..0000000000 --- a/docs/sources/flow/tasks/configure/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/configure/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/ -- /docs/grafana-cloud/send-data/agent/flow/setup/configure/ -- ../setup/configure/ # /docs/agent/latest/flow/setup/configure/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/ -description: Configure Grafana Agent Flow after it is installed -menuTitle: Configure -title: Configure Grafana Agent Flow -weight: 90 ---- - -# Configure {{% param "PRODUCT_NAME" %}} - -You can configure {{< param "PRODUCT_NAME" >}} after it is [installed][Install]. -The default River configuration file for {{< param "PRODUCT_NAME" >}} is located at: - -* Linux: `/etc/grafana-agent-flow.river` -* macOS: `$(brew --prefix)/etc/grafana-agent-flow/config.river` -* Windows: `C:\Program Files\Grafana Agent Flow\config.river` - -This section includes information that helps you configure {{< param "PRODUCT_NAME" >}}. - -{{< section >}} - -{{% docs/reference %}} -[Install]: "/docs/agent/ -> /docs/agent//flow/get-started/install/" -[Install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/" -{{% /docs/reference %}} diff --git a/docs/sources/flow/tasks/distribute-prometheus-scrape-load.md b/docs/sources/flow/tasks/distribute-prometheus-scrape-load.md deleted file mode 100644 index ee3a3fd982..0000000000 --- a/docs/sources/flow/tasks/distribute-prometheus-scrape-load.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/distribute-prometheus-scrape-load/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/distribute-prometheus-scrape-load/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/distribute-prometheus-scrape-load/ -- ../getting-started/distribute-prometheus-scrape-load/ # /docs/agent/latest/flow/getting-started/distribute-prometheus-scrape-load/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/distribute-prometheus-scrape-load/ -description: Learn how to distribute your Prometheus metrics scrape load -menuTitle: Distribute Prometheus metrics scrape load -title: Distribute Prometheus metrics scrape load -weight: 500 ---- - -# Distribute Prometheus metrics scrape load - -A good predictor for the size of an {{< param "PRODUCT_NAME" >}} deployment is the number of Prometheus targets each {{< param "PRODUCT_ROOT_NAME" >}} scrapes. -[Clustering][] with target auto-distribution allows a fleet of {{< param "PRODUCT_ROOT_NAME" >}}s to work together to dynamically distribute their scrape load, providing high-availability. - -> **Note:** Clustering is a [beta][] feature. Beta features are subject to breaking -> changes and may be replaced with equivalent functionality that covers the same use case. - -## Before you begin - -- Familiarize yourself with how to [configure existing {{< param "PRODUCT_NAME" >}} installations][configure-grafana-agent]. -- [Configure Prometheus metrics collection][]. -- [Configure clustering][]. -- Ensure that all of your clustered {{< param "PRODUCT_ROOT_NAME" >}}s have the same configuration file. - -## Steps - -To distribute Prometheus metrics scrape load with clustering: - -1. Add the following block to all `prometheus.scrape` components, which should use auto-distribution: - - ```river - clustering { - enabled = true - } - ``` - -1. Restart or reload {{< param "PRODUCT_ROOT_NAME" >}}s for them to use the new configuration. - -1. Validate that auto-distribution is functioning: - - 1. Using the {{< param "PRODUCT_ROOT_NAME" >}} [UI][] on each {{< param "PRODUCT_ROOT_NAME" >}}, navigate to the details page for one of the `prometheus.scrape` components you modified. - - 1. Compare the Debug Info sections between two different {{< param "PRODUCT_ROOT_NAME" >}} to ensure that they're not scraping the same sets of targets. - -{{% docs/reference %}} -[Clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering.md" -[Clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering.md" -[beta]: "/docs/agent/ -> /docs/agent//stability.md#beta" -[beta]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/stability.md#beta" -[configure-grafana-agent]: "/docs/agent/ -> /docs/agent//flow/tasks/configure" -[configure-grafana-agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure" -[Configure Prometheus metrics collection]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-prometheus-metrics.md" -[Configure Prometheus metrics collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/collect-prometheus-metrics.md" -[Configure clustering]: "/docs/agent/ -> /docs/agent//flow/tasks/configure-agent-clustering.md" -[Configure clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure-agent-clustering.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#component-detail-page" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#component-detail-page" -{{% /docs/reference %}} diff --git a/docs/sources/flow/tasks/estimate-resource-usage.md b/docs/sources/flow/tasks/estimate-resource-usage.md deleted file mode 100644 index f3ed1b7aed..0000000000 --- a/docs/sources/flow/tasks/estimate-resource-usage.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -aliases: - - /docs/agent/flow/tasks/estimate-resource-usage/ - - /docs/grafana-cloud/agent/flow/tasks/estimate-resource-usage/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/estimate-resource-usage/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/estimate-resource-usage/ - - /docs/grafana-cloud/send-data/agent/flow/tasks/estimate-resource-usage/ - # Previous page aliases for backwards compatibility: - - /docs/agent/flow/monitoring/resource-usage/ - - /docs/grafana-cloud/agent/flow/monitoring/resource-usage/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/resource-usage/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/resource-usage/ - - /docs/grafana-cloud/send-data/agent/flow/monitoring/resource-usage/ - - ../monitoring/resource-usage/ # /docs/agent/latest/flow/monitoring/resource-usage/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/resource-usage/ -description: Estimate expected Grafana Agent resource usage -headless: true -title: Estimate resource usage -menuTitle: Estimate resource usage -weight: 190 ---- - -# Estimate {{% param "PRODUCT_NAME" %}} resource usage - -This page provides guidance for expected resource usage of -{{< param "PRODUCT_NAME" >}} for each telemetry type, based on operational -experience of some of the {{< param "PRODUCT_NAME" >}} maintainers. - -{{< admonition type="note" >}} -The resource usage depends on the workload, hardware, and the configuration used. -The information on this page is a good starting point for most users, but your -actual usage may be different. -{{< /admonition >}} - -## Prometheus metrics - -The Prometheus metrics resource usage depends mainly on the number of active -series that need to be scraped and the scrape interval. - -As a rule of thumb, **per each 1 million active series** and with the default -scrape interval, you can expect to use approximately: - -* 0.4 CPU cores -* 11 GiB of memory -* 1.5 MiB/s of total network bandwidth, send and receive - -These recommendations are based on deployments that use [clustering][], but they -will broadly apply to other deployment modes. For more information on how to -deploy {{< param "PRODUCT_NAME" >}}, see [deploying grafana agent][]. - -[deploying grafana agent]: {{< relref "../get-started/deploy-agent.md" >}} -[clustering]: {{< relref "../concepts/clustering.md" >}} - -## Loki logs - -Loki logs resource usage depends mainly on the volume of logs ingested. - -As a rule of thumb, **per each 1 MiB/second of logs ingested**, you can expect -to use approximately: - -* 1 CPU core -* 120 MiB of memory - -These recommendations are based on Kubernetes DaemonSet deployments on clusters -with relatively small number of nodes and high logs volume on each. The resource -usage can be higher per each 1 MiB/second of logs if you have a large number of -small nodes due to the constant overhead of running the {{< param "PRODUCT_NAME" >}} on each node. - -Additionally, factors such as number of labels, number of files and average log -line length may all play a role in the resource usage. - -## Pyroscope profiles - -Pyroscope profiles resource usage depends mainly on the volume of profiles. - -As a rule of thumb, **per each 100 profiles/second**, you can expect to use -approximately: - -* 1 CPU core -* 10 GiB of memory - -Factors such as size of each profile and frequency of fetching them also play a -role in the overall resource usage. diff --git a/docs/sources/flow/tasks/migrate/_index.md b/docs/sources/flow/tasks/migrate/_index.md deleted file mode 100644 index a0c98966dc..0000000000 --- a/docs/sources/flow/tasks/migrate/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/migrate/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/ -description: How to migrate to Grafana Agent Flow -menuTitle: Migrate -title: Migrate to Grafana Agent Flow -weight: 100 ---- - -# How to migrate to {{% param "PRODUCT_NAME" %}} - -This section details how to migrate to {{< param "PRODUCT_NAME" >}} from other -common solutions. - -{{< section >}} diff --git a/docs/sources/flow/tasks/monitor/_index.md b/docs/sources/flow/tasks/monitor/_index.md deleted file mode 100644 index ac23db2607..0000000000 --- a/docs/sources/flow/tasks/monitor/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/monitor/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/monitor/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/monitor/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/monitor/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/monitoring/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/ -- /docs/grafana-cloud/send-data/agent/flow/monitoring/ -- ../monitoring/ # /docs/agent/latest/flow/monitoring/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/monitor/ -description: Learn about monitoring Grafana Agent Flow -title: Monitor Grafana Agent Flow -menuTitle: Monitor -weight: 110 ---- - -# How to monitor {{% param "PRODUCT_NAME" %}} - -This section details various ways to monitor and debug {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/tasks/monitor/component_metrics.md b/docs/sources/flow/tasks/monitor/component_metrics.md deleted file mode 100644 index 5b3693a1f1..0000000000 --- a/docs/sources/flow/tasks/monitor/component_metrics.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/monitor/component_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/monitor/component_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/monitor/component_metrics/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/monitor/component_metrics/ -- component-metrics/ # /docs/agent/latest/flow/tasks/monitor/component-metrics/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/monitoring/component_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/component_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/component_metrics/ -- /docs/grafana-cloud/send-data/agent/flow/monitoring/component_metrics/ -- ../../monitoring/component-metrics/ # /docs/agent/latest/flow/monitoring/component-metrics/ -- ../../monitoring/component_metrics/ # /docs/agent/latest/flow/monitoring/component_metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/component_metrics/ -description: Learn how to monitor component metrics -title: Monitor components -weight: 200 ---- - -# How to monitor components - -{{< param "PRODUCT_NAME" >}} [components][] may optionally expose Prometheus metrics which can be used to investigate the behavior of that component. -These component-specific metrics are only generated when an instance of that component is running. - -> Component-specific metrics are different than any metrics being processed by the component. -> Component-specific metrics are used to expose the state of a component for observability, alerting, and debugging. - -Component-specific metrics are exposed at the `/metrics` HTTP endpoint of the {{< param "PRODUCT_NAME" >}} HTTP server, which defaults to listening on `http://localhost:12345`. - -> The documentation for the [`grafana-agent run`][grafana-agent run] command describes how to > modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. - -Component-specific metrics have a `component_id` label matching the component ID generating those metrics. -For example, component-specific metrics for a `prometheus.remote_write` component labeled `production` will have a `component_id` label with the value `prometheus.remote_write.production`. - -The [reference documentation][] for each component described the list of component-specific metrics that the component exposes. -Not all components expose metrics. - -{{% docs/reference %}} -[components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[grafana-agent run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[grafana-agent run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[reference documentation]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[reference documentation]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components" -{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/tasks/monitor/controller_metrics.md b/docs/sources/flow/tasks/monitor/controller_metrics.md deleted file mode 100644 index 0ba7617032..0000000000 --- a/docs/sources/flow/tasks/monitor/controller_metrics.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/monitor/controller_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/monitor/controller_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/monitor/controller_metrics/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/monitor/controller_metrics/ -- controller-metrics/ # /docs/agent/latest/flow/tasks/monitor/controller-metrics/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/monitoring/controller_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/controller_metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/controller_metrics/ -- /docs/grafana-cloud/send-data/agent/flow/monitoring/controller_metrics/ -- ../../monitoring/controller-metrics/ # /docs/agent/latest/flow/monitoring/controller-metrics/ -- ../../monitoring/controller_metrics/ # /docs/agent/latest/flow/monitoring/controller_metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/controller_metrics/ -description: Learn how to monitor controller metrics -title: Monitor controller -weight: 100 ---- - -# How to monitor controller - -The {{< param "PRODUCT_NAME" >}} [component controller][] exposes Prometheus metrics which you can use to investigate the controller state. - -Metrics for the controller are exposed at the `/metrics` HTTP endpoint of the {{< param "PRODUCT_NAME" >}} HTTP server, which defaults to listening on `http://localhost:12345`. - -> The documentation for the [`grafana-agent run`][grafana-agent run] command describes how to modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. - -The controller exposes the following metrics: - -* `agent_component_controller_evaluating` (Gauge): Set to `1` whenever the component controller is currently evaluating components. - This value may be misrepresented depending on how fast evaluations complete or how often evaluations occur. -* `agent_component_controller_running_components` (Gauge): The current number of running components by health. - The health is represented in the `health_type` label. -* `agent_component_evaluation_seconds` (Histogram): The time it takes to evaluate components after one of their dependencies is updated. -* `agent_component_dependencies_wait_seconds` (Histogram): Time spent by components waiting to be evaluated after one of their dependencies is updated. -* `agent_component_evaluation_queue_size` (Gauge): The current number of component evaluations waiting to be performed. - -{{% docs/reference %}} -[component controller]: "/docs/agent/ -> /docs/agent//flow/concepts/component_controller.md" -[component controller]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/component_controller.md" -[grafana-agent run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[grafana-agent run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/tutorials/_index.md b/docs/sources/flow/tutorials/_index.md deleted file mode 100644 index d695d7fb13..0000000000 --- a/docs/sources/flow/tutorials/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/ -description: Learn how to use Grafana Agent Flow -title: Tutorials -weight: 300 ---- - -# Tutorials - -This section provides tutorials for learning how to use {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/tutorials/flow-by-example/_index.md b/docs/sources/flow/tutorials/flow-by-example/_index.md deleted file mode 100644 index d9b0373502..0000000000 --- a/docs/sources/flow/tutorials/flow-by-example/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/flow-by-example/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/ -description: Learn how to use Grafana Agent Flow -title: Flow by example -weight: 100 ---- - -# Flow by example - -This section provides a set of step-by-step tutorials that show how to use {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/tutorials/flow-by-example/get-started.md b/docs/sources/flow/tutorials/flow-by-example/get-started.md deleted file mode 100644 index 5fa1bbd5b5..0000000000 --- a/docs/sources/flow/tutorials/flow-by-example/get-started.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/faq/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/faq/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/faq/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/flow-by-example/faq/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/faq/ -description: Getting started with Flow-by-Example Tutorials -title: Get started -weight: 10 ---- - -## Who is this for? - -This set of tutorials contains a collection of examples that build on each other to demonstrate how to configure and use [{{< param "PRODUCT_NAME" >}}][flow]. It assumes you have a basic understanding of what {{< param "PRODUCT_ROOT_NAME" >}} is and telemetry collection in general. It also assumes a base level of familiarity with Prometheus and PromQL, Loki and LogQL, and basic Grafana navigation. It assumes no knowledge of {{< param "PRODUCT_NAME" >}} or River concepts. - -[flow]: https://grafana.com/docs/agent/latest/flow - -## What is Flow? - -Flow is a new way to configure {{< param "PRODUCT_NAME" >}}. It is a declarative configuration language that allows you to define a pipeline of telemetry collection, processing, and output. It is built on top of the [River](https://github.com/grafana/river) configuration language, which is designed to be fast, simple, and debuggable. - -## What do I need to get started? - -You will need a Linux or Unix environment with Docker installed. The examples are designed to be run on a single host so that you can run them on your laptop or in a VM. You are encouraged to follow along with the examples using a `config.river` file and experiment with the examples yourself. - -To run the examples, you should have a Grafana Agent binary available. You can follow the instructions on how to [Install Grafana Agent as a Standalone Binary](https://grafana.com/docs/agent/latest/flow/setup/install/binary/#install-grafana-agent-in-flow-mode-as-a-standalone-binary) to get a binary. - -## How should I follow along? - -You can use this docker-compose file to set up a local Grafana instance alongside Loki and Prometheus pre-configured as datasources. The examples are designed to be run locally, so you can follow along and experiment with them yourself. - -```yaml -version: '3' -services: - loki: - image: grafana/loki:2.9.0 - ports: - - "3100:3100" - command: -config.file=/etc/loki/local-config.yaml - prometheus: - image: prom/prometheus:v2.47.0 - command: - - --web.enable-remote-write-receiver - - --config.file=/etc/prometheus/prometheus.yml - ports: - - "9090:9090" - grafana: - environment: - - GF_PATHS_PROVISIONING=/etc/grafana/provisioning - - GF_AUTH_ANONYMOUS_ENABLED=true - - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - entrypoint: - - sh - - -euc - - | - mkdir -p /etc/grafana/provisioning/datasources - cat < /etc/grafana/provisioning/datasources/ds.yaml - apiVersion: 1 - datasources: - - name: Loki - type: loki - access: proxy - orgId: 1 - url: http://loki:3100 - basicAuth: false - isDefault: false - version: 1 - editable: false - - name: Prometheus - type: prometheus - orgId: 1 - url: http://prometheus:9090 - basicAuth: false - isDefault: true - version: 1 - editable: false - EOF - /run.sh - image: grafana/grafana:latest - ports: - - "3000:3000" -``` - -After running `docker-compose up`, open [http://localhost:3000](http://localhost:3000) in your browser to view the Grafana UI. - -The tutorials are designed to be followed in order and generally build on each other. Each example explains what it does and how it works. They are designed to be run locally, so you can follow along and experiment with them yourself. - -The Recommended Reading sections in each tutorial provide a list of documentation topics. To help you understand the concepts used in the example, read the recommended topics in the order given. diff --git a/docs/sources/get-started/_index.md b/docs/sources/get-started/_index.md new file mode 100644 index 0000000000..217738d064 --- /dev/null +++ b/docs/sources/get-started/_index.md @@ -0,0 +1,13 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/ +description: Learn how to install and use Grafana Alloy +menuTitle: Get started +title: Get started with Grafana Alloy +weight: 50 +--- + +# Get started with {{% param "PRODUCT_NAME" %}} + +This section covers topics that help you get started with {{< param "PRODUCT_NAME" >}}, including installation, running {{< param "PRODUCT_NAME" >}}, overview of deployment topologies, and more. + +{{< section >}} diff --git a/docs/sources/flow/get-started/deploy-agent.md b/docs/sources/get-started/deploy-alloy.md similarity index 66% rename from docs/sources/flow/get-started/deploy-agent.md rename to docs/sources/get-started/deploy-alloy.md index 0a76e62c42..cd3fcb4e1a 100644 --- a/docs/sources/flow/get-started/deploy-agent.md +++ b/docs/sources/get-started/deploy-alloy.md @@ -1,27 +1,16 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/deploy-agent/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/deploy-agent/ -# Previous docs aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/deploy-agent/ -- /docs/grafana-cloud/send-data/agent/flow/setup/deploy-agent/ -- ../setup/deploy-agent/ # /docs/agent/latest/flow/setup/deploy-agent/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/deploy-agent/ -description: Learn about possible deployment topologies for Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/get-started/deploy-alloy/ +description: Learn about possible deployment topologies for Grafana Alloy menuTitle: Deploy -title: Grafana Agent Flow deployment topologies +title: Grafana Alloy deployment topologies weight: 900 --- -{{< docs/shared source="agent" lookup="/deploy-agent.md" version="" >}} +{{< docs/shared source="alloy" lookup="/deploy-alloy.md" version="" >}} ## Processing different types of telemetry in different {{< param "PRODUCT_ROOT_NAME" >}} instances -If the load on {{< param "PRODUCT_ROOT_NAME" >}} is small, it is recommended to process all necessary telemetry signals in the same {{< param "PRODUCT_ROOT_NAME" >}} process. +If the load on {{< param "PRODUCT_ROOT_NAME" >}} is small, it is recommended to process all necessary telemetry signals in the same {{< param "PRODUCT_ROOT_NAME" >}} process. For example, a single {{< param "PRODUCT_ROOT_NAME" >}} can process all of the incoming metrics, logs, traces, and profiles. However, if the load on the {{< param "PRODUCT_ROOT_NAME" >}}s is big, it may be beneficial to process different telemetry signals in different deployments of {{< param "PRODUCT_ROOT_NAME" >}}s. @@ -30,7 +19,7 @@ This provides better stability due to the isolation between processes. For example, an overloaded {{< param "PRODUCT_ROOT_NAME" >}} processing traces won't impact an {{< param "PRODUCT_ROOT_NAME" >}} processing metrics. Different types of signal collection require different methods for scaling: -* "Pull" components such as `prometheus.scrape` and `pyroscope.scrape` are scaled using hashmod sharing or clustering. +* "Pull" components such as `prometheus.scrape` and `pyroscope.scrape` are scaled using hashmod sharing or clustering. * "Push" components such as `otelcol.receiver.otlp` are scaled by placing a load balancer in front of them. ### Traces @@ -49,12 +38,10 @@ To decide whether scaling is necessary, check metrics such as: #### Stateful and stateless components -In the context of tracing, a "stateful component" is a component -that needs to aggregate certain spans to work correctly. +In the context of tracing, a "stateful component" is a component that needs to aggregate certain spans to work correctly. A "stateless {{< param "PRODUCT_ROOT_NAME" >}}" is a {{< param "PRODUCT_ROOT_NAME" >}} which does not contain stateful components. -Scaling stateful {{< param "PRODUCT_ROOT_NAME" >}}s is more difficult, because spans must be forwarded to a -specific {{< param "PRODUCT_ROOT_NAME" >}} according to a span property such as trace ID or a `service.name` attribute. +Scaling stateful {{< param "PRODUCT_ROOT_NAME" >}}s is more difficult, because spans must be forwarded to a specific {{< param "PRODUCT_ROOT_NAME" >}} according to a span property such as trace ID or a `service.name` attribute. You can forward spans with `otelcol.exporter.loadbalancing`. Examples of stateful components: @@ -65,8 +52,8 @@ Examples of stateful components: -A "stateless component" does not need to aggregate specific spans to work correctly - -it can work correctly even if it only has some of the spans of a trace. +A "stateless component" doesn't need to aggregate specific spans to work correctly. +It can work correctly even if it only has some of the spans of a trace. A stateless {{< param "PRODUCT_ROOT_NAME" >}} can be scaled without using `otelcol.exporter.loadbalancing`. For example, you could use an off-the-shelf load balancer to do a round-robin load balancing. diff --git a/docs/sources/get-started/install/_index.md b/docs/sources/get-started/install/_index.md new file mode 100644 index 0000000000..4ccae7825e --- /dev/null +++ b/docs/sources/get-started/install/_index.md @@ -0,0 +1,31 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/install/ +description: Learn how to install Grafana Agent Flow +menuTitle: Install +title: Install Grafana Agent Flow +weight: 50 +--- + +# Install {{% param "PRODUCT_NAME" %}} + +You can install {{< param "PRODUCT_NAME" >}} on Docker, Kubernetes, Linux, macOS, or Windows. + +The following architectures are supported: + +- Linux: AMD64, ARM64 +- Windows: AMD64 +- macOS: AMD64 (Intel), ARM64 (Apple Silicon) +- FreeBSD: AMD64 + +{{< admonition type="note" >}} +Installing {{< param "PRODUCT_NAME" >}} on other operating systems is possible, but isn't recommended or supported. +{{< /admonition >}} + +{{< section >}} + +## Data collection + +By default, {{< param "PRODUCT_NAME" >}} sends anonymous usage information to Grafana Labs. +Refer to [data collection][] for more information about what data is collected and how you can opt-out. + +[data collection]: "../../../data-collection/ diff --git a/docs/sources/flow/get-started/install/ansible.md b/docs/sources/get-started/install/ansible.md similarity index 70% rename from docs/sources/flow/get-started/install/ansible.md rename to docs/sources/get-started/install/ansible.md index 837f5553eb..bbd8209f89 100644 --- a/docs/sources/flow/get-started/install/ansible.md +++ b/docs/sources/get-started/install/ansible.md @@ -1,13 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/ansible/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/ansible/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/ansible/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/ansible/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/ansible/ -description: Learn how to install Grafana Agent Flow with Ansible +canonical: https://grafana.com/docs/alloy/latest/get-started/install/ansible/ +description: Learn how to install Grafana Alloy with Ansible menuTitle: Ansible -title: Install Grafana Agent Flow with Ansible +title: Install Grafana Alloy with Ansible weight: 550 --- @@ -17,7 +12,7 @@ You can use Ansible to install and manage {{< param "PRODUCT_NAME" >}} on Linux ## Before you begin -- These steps assume you already have a working [Ansible](https://www.ansible.com/) setup and a pre-existing inventory. +- These steps assume you already have a working [Ansible][] setup and a pre-existing inventory. - You can add the tasks below to any new or existing role. ## Steps @@ -45,7 +40,6 @@ To add {{% param "PRODUCT_NAME" %}} to a host: ``` Replace the following: - - _``_: The path to the River configuration file on the Ansible Controller (Localhost). 1. Run the Ansible playbook. Open a terminal window and run the following command from the Ansible playbook directory. @@ -80,7 +74,5 @@ Main PID: 3176 (agent-linux-amd) - [Configure {{< param "PRODUCT_NAME" >}}][Configure] -{{% docs/reference %}} -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" -{{% /docs/reference %}} +[Ansible]: https://www.ansible.com/ +[Configure]: ../../../tasks/configure/configure-linux/ diff --git a/docs/sources/flow/get-started/install/binary.md b/docs/sources/get-started/install/binary.md similarity index 50% rename from docs/sources/flow/get-started/install/binary.md rename to docs/sources/get-started/install/binary.md index fa304df0ac..d58d142742 100644 --- a/docs/sources/flow/get-started/install/binary.md +++ b/docs/sources/get-started/install/binary.md @@ -1,17 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/binary/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/binary/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/binary/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/binary/ -# Previous docs aliases for backwards compatibility: -- ../../install/binary/ # /docs/agent/latest/flow/install/binary/ -- /docs/grafana-cloud/agent/flow/setup/install/binary/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/binary/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/binary/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/binary/ -- ../../setup/install/binary/ # /docs/agent/latest/flow/setup/install/binary/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/binary/ +canonical: https://grafana.com/docs/alloy/latest/get-started/install/binary/ description: Learn how to install Grafana Agent Flow as a standalone binary menuTitle: Standalone title: Install Grafana Agent Flow as a standalone binary @@ -31,7 +19,7 @@ weight: 600 To download {{< param "PRODUCT_NAME" >}} as a standalone binary, perform the following steps. -1. Navigate to the current {{< param "PRODUCT_ROOT_NAME" >}} [release](https://github.com/grafana/agent/releases) page. +1. Navigate to the current {{< param "PRODUCT_ROOT_NAME" >}} [release][] page. 1. Scroll down to the **Assets** section. @@ -46,14 +34,11 @@ To download {{< param "PRODUCT_NAME" >}} as a standalone binary, perform the fol ``` Replace the following: - - _``_: The path to the extracted binary. ## Next steps - [Run {{< param "PRODUCT_NAME" >}}][Run] -{{% docs/reference %}} -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/binary.md" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/binary.md" -{{% /docs/reference %}} +[release]: https://github.com/grafana/alloy/releases +[Run]: ../../run/binary/ diff --git a/docs/sources/flow/get-started/install/chef.md b/docs/sources/get-started/install/chef.md similarity index 76% rename from docs/sources/flow/get-started/install/chef.md rename to docs/sources/get-started/install/chef.md index ef348384a5..1f17d1c569 100644 --- a/docs/sources/flow/get-started/install/chef.md +++ b/docs/sources/get-started/install/chef.md @@ -1,14 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/chef/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/chef/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/chef/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/chef/ - -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/chef/ -description: Learn how to install Grafana Agent Flow with Chef +canonical: https://grafana.com/docs/alloy/latest/get-started/install/chef/ +description: Learn how to install Grafana Alloy with Chef menuTitle: Chef -title: Install Grafana Agent Flow with Chef +title: Install Grafana Alloy with Chef weight: 550 --- @@ -20,7 +14,8 @@ You can use Chef to install and manage {{< param "PRODUCT_NAME" >}}. - These steps assume you already have a working [Chef][] setup. - You can add the following resources to any new or existing recipe. -- These tasks install {{< param "PRODUCT_NAME" >}} from the package repositories. The tasks target Linux systems from the following families: +- These tasks install {{< param "PRODUCT_NAME" >}} from the package repositories. + The tasks target Linux systems from the following families: - Debian (including Ubuntu) - RedHat Enterprise Linux - Amazon Linux @@ -97,8 +92,4 @@ The default configuration file location is `/etc/grafana-agent-flow.river`. You - [Configure {{< param "PRODUCT_NAME" >}}][Configure] [Chef]: https://www.chef.io/products/chef-infrastructure-management/ - -{{% docs/reference %}} -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" -{{% /docs/reference %}} +[Configure]: ../../../tasks/configure/configure-linux/ diff --git a/docs/sources/flow/get-started/install/docker.md b/docs/sources/get-started/install/docker.md similarity index 66% rename from docs/sources/flow/get-started/install/docker.md rename to docs/sources/get-started/install/docker.md index c7e07b1b3b..8db56e706b 100644 --- a/docs/sources/flow/get-started/install/docker.md +++ b/docs/sources/get-started/install/docker.md @@ -1,20 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/docker/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/docker/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/docker/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/docker/ -# Previous docs aliases for backwards compatibility: -- ../../install/docker/ # /docs/agent/latest/flow/install/docker/ -- /docs/grafana-cloud/agent/flow/setup/install/docker/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/docker/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/docker/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/docker/ -- ../../setup/install/docker/ # /docs/agent/latest/flow/setup/install/docker/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/docker/ -description: Learn how to install Grafana Agent Flow on Docker +canonical: https://grafana.com/docs/alloy/latest/get-started/install/docker/ +description: Learn how to install Grafana Alloy on Docker menuTitle: Docker -title: Run Grafana Agent Flow in a Docker container +title: Run Grafana Alloy in a Docker container weight: 100 --- @@ -94,10 +82,5 @@ To verify that {{< param "PRODUCT_NAME" >}} is running successfully, navigate to [Linux containers]: #run-a-linux-docker-container [Windows containers]: #run-a-windows-docker-container [Docker]: https://docker.io - -{{% docs/reference %}} -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} +[run]: ../../../reference/cli/run/ +[UI]: ../../../tasks/debug/#grafana-agent-flow-ui diff --git a/docs/sources/get-started/install/kubernetes.md b/docs/sources/get-started/install/kubernetes.md new file mode 100644 index 0000000000..68f93fb150 --- /dev/null +++ b/docs/sources/get-started/install/kubernetes.md @@ -0,0 +1,53 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/install/kubernetes/ +description: Learn how to deploy Grafana Agent Flow on Kubernetes +menuTitle: Kubernetes +title: Deploy Grafana Agent Flow on Kubernetes +weight: 200 +--- + +# Deploy {{% param "PRODUCT_NAME" %}} on Kubernetes + +{{< param "PRODUCT_NAME" >}} can be deployed on Kubernetes by using the Helm chart for {{< param "PRODUCT_ROOT_NAME" >}}. + +## Before you begin + +* Install [Helm][] on your computer. +* Configure a Kubernetes cluster that you can use for {{< param "PRODUCT_NAME" >}}. +* Configure your local Kubernetes context to point at the cluster. + +## Deploy + +To deploy {{< param "PRODUCT_ROOT_NAME" >}} on Kubernetes using Helm, run the following commands in a terminal window: + +1. Add the Grafana Helm chart repository: + + ```shell + helm repo add grafana https://grafana.github.io/helm-charts + ``` + +1. Update the Grafana Helm chart repository: + + ```shell + helm repo update + ``` + +1. Install {{< param "PRODUCT_ROOT_NAME" >}}: + + ```shell + helm install grafana/grafana-agent + ``` + + Replace the following: + + - _``_: The name to use for your {{< param "PRODUCT_ROOT_NAME" >}} installation, such as `grafana-agent-flow`. + +For more information on the {{< param "PRODUCT_ROOT_NAME" >}} Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +[Helm]: https://helm.sh +[Artifact Hub]: https://artifacthub.io/packages/helm/grafana/grafana-agent +[Configure]: ../../../tasks/configure/configure-kubernetes/ diff --git a/docs/sources/flow/get-started/install/linux.md b/docs/sources/get-started/install/linux.md similarity index 65% rename from docs/sources/flow/get-started/install/linux.md rename to docs/sources/get-started/install/linux.md index 2241aeb78d..a2ab220a67 100644 --- a/docs/sources/flow/get-started/install/linux.md +++ b/docs/sources/get-started/install/linux.md @@ -1,20 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/linux/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/linux/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/linux/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/linux/ -# Previous docs aliases for backwards compatibility: -- ../../install/linux/ # /docs/agent/latest/flow/install/linux/ -- /docs/grafana-cloud/agent/flow/setup/install/linux/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/linux/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/linux/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/linux/ -- ../../setup/install/linux/ # /docs/agent/latest/flow/setup/install/linux/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/linux/ -description: Learn how to install Grafana Agent Flow on Linux +canonical: https://grafana.com/docs/alloy/latest/get-started/install/linux/ +description: Learn how to install Grafana Alloy on Linux menuTitle: Linux -title: Install Grafana Agent Flow on Linux +title: Install Grafana Alloy on Linux weight: 300 --- @@ -128,9 +116,5 @@ To uninstall {{< param "PRODUCT_NAME" >}} on Linux, run the following commands i - [Run {{< param "PRODUCT_NAME" >}}][Run] - [Configure {{< param "PRODUCT_NAME" >}}][Configure] -{{% docs/reference %}} -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/linux.md" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/linux.md" -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" -{{% /docs/reference %}} +[Run]: ../../run/linux/ +[Configure]: ../../../tasks/configure/configure-linux/ diff --git a/docs/sources/get-started/install/macos.md b/docs/sources/get-started/install/macos.md new file mode 100644 index 0000000000..f151f0fd6a --- /dev/null +++ b/docs/sources/get-started/install/macos.md @@ -0,0 +1,70 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/install/macos/ +description: Learn how to install Grafana Alloy on macOS +menuTitle: macOS +title: Install Grafana Alloy on macOS +weight: 400 +--- + +# Install {{% param "PRODUCT_NAME" %}} on macOS + +You can install {{< param "PRODUCT_NAME" >}} on macOS with Homebrew . + +{{< admonition type="note" >}} +The default prefix for Homebrew on Intel is `/usr/local`. +The default prefix for Homebrew on Apple Silicon is `/opt/Homebrew`. +To verify the default prefix for Homebrew on your computer, open a terminal window and type `brew --prefix`. +{{< /admonition >}} + +## Before you begin + +* Install [Homebrew][] on your computer. + +## Install + +To install {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in a terminal window. + +1. Add the Grafana Homebrew tap: + + ```shell + brew tap grafana/grafana + ``` + +1. Install {{< param "PRODUCT_NAME" >}}: + + ```shell + brew install grafana-agent-flow + ``` + +## Upgrade + +To upgrade {{< param "PRODUCT_NAME" >}} on macOS, run the following commands in a terminal window. + +1. Upgrade {{< param "PRODUCT_NAME" >}}: + + ```shell + brew upgrade grafana-agent-flow + ``` + +1. Restart {{< param "PRODUCT_NAME" >}}: + + ```shell + brew services restart grafana-agent-flow + ``` + +## Uninstall + +To uninstall {{< param "PRODUCT_NAME" >}} on macOS, run the following command in a terminal window: + +```shell +brew uninstall grafana-agent-flow +``` + +## Next steps + +- [Run {{< param "PRODUCT_NAME" >}}][Run] +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +[Homebrew]: https://brew.sh +[Run]: ../../run/macos/ +[Configure]: ../../../tasks/configure/configure-macos/ diff --git a/docs/sources/flow/get-started/install/puppet.md b/docs/sources/get-started/install/puppet.md similarity index 72% rename from docs/sources/flow/get-started/install/puppet.md rename to docs/sources/get-started/install/puppet.md index db3fb2b488..021221ab45 100644 --- a/docs/sources/flow/get-started/install/puppet.md +++ b/docs/sources/get-started/install/puppet.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/puppet/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/puppet/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/puppet/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/puppet/ - -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/puppet/ +canonical: https://grafana.com/docs/alloy/latest/get-started/install/puppet/ description: Learn how to install Grafana Agent Flow with Puppet menuTitle: Puppet title: Install Grafana Agent Flow with Puppet @@ -41,7 +35,7 @@ To add {{< param "PRODUCT_NAME" >}} to a host: } ``` -1. Create a new [Puppet][] manifest with the following class to add the Grafana package repositories, install the `grafana-agent-flow` package, and run the service: +1. Create a new [Puppet][] manifest with the following class to add the Grafana package repositories, install the `grafana-alloy` package, and run the service: ```ruby class grafana_agent::grafana_agent_flow () { @@ -97,17 +91,14 @@ To add {{< param "PRODUCT_NAME" >}} to a host: ## Configuration -The `grafana-agent-flow` package installs a default configuration file that doesn't send telemetry anywhere. +The `grafana-alloy` package installs a default configuration file that doesn't send telemetry anywhere. -The default configuration file location is `/etc/grafana-agent-flow.river`. You can replace this file with your own configuration, or create a new configuration file for the service to use. +The default configuration file location is `/etc/grafana-alloy.river`. +You can replace this file with your own configuration, or create a new configuration file for the service to use. ## Next steps - [Configure {{< param "PRODUCT_NAME" >}}][Configure] [Puppet]: https://www.puppet.com/ - -{{% docs/reference %}} -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" -{{% /docs/reference %}} +[Configure]: ../../../tasks/configure/configure-linux/ diff --git a/docs/sources/flow/get-started/install/windows.md b/docs/sources/get-started/install/windows.md similarity index 57% rename from docs/sources/flow/get-started/install/windows.md rename to docs/sources/get-started/install/windows.md index a20ed34497..ba827e3c46 100644 --- a/docs/sources/flow/get-started/install/windows.md +++ b/docs/sources/get-started/install/windows.md @@ -1,20 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/get-started/install/windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/windows/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/windows/ -- /docs/grafana-cloud/send-data/agent/flow/get-started/install/windows/ -# Previous docs aliases for backwards compatibility: -- ../../install/windows/ # /docs/agent/latest/flow/install/windows/ -- /docs/grafana-cloud/agent/flow/setup/install/windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/windows/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/windows/ -- /docs/grafana-cloud/send-data/agent/flow/setup/install/windows/ -- ../../setup/install/windows/ # /docs/agent/latest/flow/setup/install/windows/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/windows/ -description: Learn how to install Grafana Agent Flow on Windows +canonical: https://grafana.com/docs/alloy/latest/get-started/install/windows/ +description: Learn how to install Grafana Alloy on Windows menuTitle: Windows -title: Install Grafana Agent Flow on Windows +title: Install Grafana Alloy on Windows weight: 500 --- @@ -76,7 +64,7 @@ To do a silent install of {{< param "PRODUCT_NAME" >}} on Windows, perform the f ## Uninstall -You can uninstall {{< param "PRODUCT_NAME" >}} with Windows Remove Programs or `C:\Program Files\Grafana Agent\uninstaller.exe`. +You can uninstall {{< param "PRODUCT_NAME" >}} with Windows Remove Programs or `C:\Program Files\Grafana Alloy\uninstaller.exe`. Uninstalling {{< param "PRODUCT_NAME" >}} stops the service and removes it from disk. This includes any configuration files in the installation directory. @@ -84,16 +72,10 @@ This includes any configuration files in the installation directory. ## Next steps -- [Run {{< param "PRODUCT_NAME" >}}][Start] +- [Run {{< param "PRODUCT_NAME" >}}][Run] - [Configure {{< param "PRODUCT_NAME" >}}][Configure] -[latest]: https://github.com/grafana/agent/releases/latest - -{{% docs/reference %}} -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/windows.md" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/windows.md" -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-windows.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-windows.md" -[data collection]: "/docs/agent/ -> /docs/agent//data-collection.md" -[data collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/data-collection.md" -{{% /docs/reference %}} +[latest]: https://github.com/grafana/alloy/releases/latest +[data collection]: ../../../data-collection/ +[Run]: ../../run/windows/ +[Configure]: ../../../tasks/configure/configure-windows/ diff --git a/docs/sources/get-started/run/_index.md b/docs/sources/get-started/run/_index.md new file mode 100644 index 0000000000..90dbc4192d --- /dev/null +++ b/docs/sources/get-started/run/_index.md @@ -0,0 +1,16 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/run/ +description: Learn how to run Grafana Alloy +menuTitle: Run +title: Run Grafana Alloy +weight: 50 +--- + +# Run {{% param "PRODUCT_NAME" %}} + +Use the following pages to learn how to start, restart, and stop {{< param "PRODUCT_NAME" >}} after it's installed. +For installation instructions, refer to [Install {{< param "PRODUCT_NAME" >}}][Install]. + +{{< section >}} + +[Install]: ../install/ diff --git a/docs/sources/flow/get-started/run/binary.md b/docs/sources/get-started/run/binary.md similarity index 78% rename from docs/sources/flow/get-started/run/binary.md rename to docs/sources/get-started/run/binary.md index 0b9ac5b7d7..8000ec6786 100644 --- a/docs/sources/flow/get-started/run/binary.md +++ b/docs/sources/get-started/run/binary.md @@ -1,13 +1,8 @@ --- -aliases: - - /docs/grafana-cloud/agent/flow/get-started/run/binary/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/binary/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/binary/ - - /docs/grafana-cloud/send-data/agent/flow/get-started/run/binary/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/binary/ -description: Learn how to run Grafana Agent Flow as a standalone binary +canonical: https://grafana.com/docs/alloy/latest/flow/get-started/run/binary/ +description: Learn how to run Grafana Alloy as a standalone binary menuTitle: Standalone -title: Run Grafana Agent Flow as a standalone binary +title: Run Grafana Alloy as a standalone binary weight: 600 --- @@ -118,9 +113,5 @@ These steps assume you have a default systemd and {{< param "PRODUCT_NAME" >}} c 1. Use the [Linux][StartLinux] systemd commands to manage your standalone Linux installation of {{< param "PRODUCT_NAME" >}}. -{{% docs/reference %}} -[InstallBinary]: "/docs/agent/ -> /docs/agent//flow/get-started/install/binary.md" -[InstallBinary]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/binary.md" -[StartLinux]: "/docs/agent/ -> /docs/agent//flow/get-started/run/linux.md" -[StartLinux]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/run/linux.md" -{{% /docs/reference %}} +[InstallBinary]: ../../install/binary/ +[StartLinux]: ../linux/ diff --git a/docs/sources/flow/get-started/run/linux.md b/docs/sources/get-started/run/linux.md similarity index 63% rename from docs/sources/flow/get-started/run/linux.md rename to docs/sources/get-started/run/linux.md index 1085aaabdf..0fb7873d69 100644 --- a/docs/sources/flow/get-started/run/linux.md +++ b/docs/sources/get-started/run/linux.md @@ -1,10 +1,5 @@ --- -aliases: - - /docs/grafana-cloud/agent/flow/get-started/run/linux/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/linux/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/linux/ - - /docs/grafana-cloud/send-data/agent/flow/get-started/run/linux/ -canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/linux/ +canonical: https://grafana.com/docs/alloy/latest/get-started/run/linux/ description: Learn how to run Grafana Agent Flow on Linux menuTitle: Linux title: Run Grafana Agent Flow on Linux @@ -15,8 +10,6 @@ weight: 300 {{< param "PRODUCT_NAME" >}} is [installed][InstallLinux] as a [systemd][] service on Linux. -[systemd]: https://systemd.io/ - ## Start {{% param "PRODUCT_NAME" %}} To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: @@ -67,9 +60,6 @@ sudo journalctl -u grafana-agent-flow - [Configure {{< param "PRODUCT_NAME" >}}][Configure] -{{% docs/reference %}} -[InstallLinux]: "/docs/agent/ -> /docs/agent//flow/get-started/install/linux.md" -[InstallLinux]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/linux.md" -[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" -{{% /docs/reference %}} +[InstallLinux]: ../../install/linux/ +[systemd]: https://systemd.io/ +[Configure]: ../../../tasks/configure/configure-linux/ diff --git a/docs/sources/get-started/run/macos.md b/docs/sources/get-started/run/macos.md new file mode 100644 index 0000000000..df3ef5537c --- /dev/null +++ b/docs/sources/get-started/run/macos.md @@ -0,0 +1,57 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/run/macos/ +description: Learn how to run Grafana Alloy on macOS +menuTitle: macOS +title: Run Grafana Alloy on macOS +weight: 400 +--- + +# Run {{% param "PRODUCT_NAME" %}} on macOS + +{{< param "PRODUCT_NAME" >}} is [installed][InstallMacOS] as a launchd service on macOS. + +## Start {{% param "PRODUCT_NAME" %}} + +To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +brew services start grafana-agent-flow +``` + +{{< param "PRODUCT_NAME" >}} automatically runs when the system starts. + +(Optional) To verify that the service is running, run the following command in a terminal window: + +```shell +brew services info grafana-agent-flow +``` + +## Restart {{% param "PRODUCT_NAME" %}} + +To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +brew services restart grafana-agent-flow +``` + +## Stop {{% param "PRODUCT_NAME" %}} + +To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +brew services stop grafana-agent-flow +``` + +## View {{% param "PRODUCT_NAME" %}} logs on macOS + +By default, logs are written to `$(brew --prefix)/var/log/grafana-agent-flow.log` and `$(brew --prefix)/var/log/grafana-agent-flow.err.log`. + +If you followed [Configure the {{< param "PRODUCT_NAME" >}} service][ConfigureService] and changed the path where logs are written, refer to your current copy of the {{< param "PRODUCT_NAME" >}} formula to locate your log files. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][ConfigureMacOS] + +[InstallMacOS]: ../../install/macos/ +[ConfigureMacOS]: ../../../tasks/configure/configure-macos/ +[ConfigureService]: ../../../tasks/configure/configure-macos/#configure-the-grafana-alloy-service diff --git a/docs/sources/get-started/run/windows.md b/docs/sources/get-started/run/windows.md new file mode 100644 index 0000000000..1943d3fe28 --- /dev/null +++ b/docs/sources/get-started/run/windows.md @@ -0,0 +1,45 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/get-started/run/windows/ +description: Learn how to run Grafana Alloy on Windows +menuTitle: Windows +title: Run Grafana Alloy on Windows +weight: 500 +--- + +# Run {{% param "PRODUCT_NAME" %}} on Windows + +{{< param "PRODUCT_NAME" >}} is [installed][InstallWindows] as a Windows Service. +The service is configured to automatically run on startup. + +To verify that {{< param "PRODUCT_NAME" >}} is running as a Windows Service: + +1. Open the Windows Services manager (services.msc): + + 1. Right click on the Start Menu and select **Run**. + + 1. Type: `services.msc` and click **OK**. + +1. Scroll down to find the **{{< param "PRODUCT_NAME" >}}** service and verify that the **Status** is **Running**. + +## View {{% param "PRODUCT_NAME" %}} logs + +When running on Windows, {{< param "PRODUCT_NAME" >}} writes its logs to Windows Event Logs with an event source name of **{{< param "PRODUCT_NAME" >}}**. + +To view the logs, perform the following steps: + +1. Open the Event Viewer: + + 1. Right click on the Start Menu and select **Run**. + + 1. Type `eventvwr` and click **OK**. + +1. In the Event Viewer, click on **Windows Logs > Application**. + +1. Search for events with the source **{{< param "PRODUCT_NAME" >}}**. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +[InstallWindows]: ../../install/windows/ +[Configure]: ../../../tasks/configure/configure-windows/ diff --git a/docs/sources/operator/_index.md b/docs/sources/operator/_index.md deleted file mode 100644 index a39241c87a..0000000000 --- a/docs/sources/operator/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/ -- /docs/grafana-cloud/send-data/agent/operator/ -canonical: https://grafana.com/docs/agent/latest/operator/ -description: Learn about the static mode Kubernetes operator -menuTitle: Static mode Kubernetes operator -title: Static mode Kubernetes operator (Beta) -weight: 300 ---- - -# Static mode Kubernetes operator (Beta) - -Grafana Agent Operator is a [Kubernetes operator][] for the [static mode][] of -Grafana Agent. It makes it easier to deploy and configure static mode to -collect telemetry data from Kubernetes resources. - -Grafana Agent Operator supports consuming various [custom resources][] for -telemetry collection: - -* Prometheus Operator [ServiceMonitor][] resources for collecting metrics from Kubernetes [Services][]. -* Prometheus Operator [PodMonitor][] resources for collecting metrics from Kubernetes [Pods][]. -* Prometheus Operator [Probe][] resources for collecting metrics from Kubernetes [Ingresses][]. -* Custom [PodLogs][] resources for collecting logs. - -{{< admonition type="note" >}} -Grafana Agent Operator does not collect traces. -{{< /admonition >}} - -Grafana Agent Operator is currently in [Beta][], and is subject to change or -being removed with functionality which covers the same use case. - -{{< admonition type="note" >}} -If you are shipping your data to Grafana Cloud, use [Kubernetes Monitoring](/docs/grafana-cloud/kubernetes-monitoring/) to set up Agent Operator. -Kubernetes Monitoring provides a simplified approach and preconfigured dashboards and alerts. -{{< /admonition >}} - -Grafana Agent Operator uses additional custom resources to manage the deployment -and configuration of Grafana Agents running in static mode. In addition to the -supported custom resources, you can also provide your own Service Discovery -(SD) configurations to collect metrics from other types of sources. - -Grafana Agent Operator is particularly useful for Helm users, where manually -writing generic service discovery to match all of your chart installations can -be difficult, or where manually writing a specific SD for each chart -installation can be tedious. - -The following sections describe how to use Grafana Agent Operator: - -| Topic | Describes | -|---|---| -| [Configure Kubernetes Monitoring using Agent Operator](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/configure-infrastructure-manually/k8s-agent-operator/) | Use the Kubernetes Monitoring solution to set up monitoring of your Kubernetes cluster and to install preconfigured dashboards and alerts. | -| [Install Grafana Agent Operator with Helm]({{< relref "./helm-getting-started" >}}) | How to deploy the Grafana Agent Operator into your Kubernetes cluster using the grafana-agent-operator Helm chart. | -| [Install Grafana Agent Operator]({{< relref "./getting-started" >}}) | How to deploy the Grafana Agent Operator into your Kubernetes cluster without using Helm. | -| [Deploy the Grafana Agent Operator resources]({{< relref "./deploy-agent-operator-resources" >}}) | How to roll out the Grafana Agent Operator custom resources, needed to begin monitoring your cluster. Complete this procedure *after* installing Grafana Agent Operator—either with or without Helm. | -| [Grafana Agent Operator architecture]({{< relref "./architecture" >}}) | Learn about the resources used by Agent Operator to collect telemetry data and how it discovers the hierarchy of custom resources, continually reconciling the hierarchy. | -| [Set up Agent Operator integrations]({{< relref "./operator-integrations" >}}) | Learn how to set up node-exporter and mysqld-exporter integrations. | - -[Kubernetes operator]: https://www.cncf.io/blog/2022/06/15/kubernetes-operators-what-are-they-some-examples/ -[static mode]: {{< relref "../static/" >}} -[Services]: https://kubernetes.io/docs/concepts/services-networking/service/ -[Pods]: https://kubernetes.io/docs/concepts/workloads/pods/ -[Ingresses]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[custom resources]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/ -[Beta]: {{< relref "../stability.md#beta" >}} -[ServiceMonitor]: https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.ServiceMonitor -[PodMonitor]: https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor -[Probe]: https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.Probe -[PodLogs]: {{< relref "./api.md#podlogs-a-namemonitoringgrafanacomv1alpha1podlogsa">}} -[Prometheus Operator]: https://github.com/prometheus-operator/prometheus-operator diff --git a/docs/sources/operator/add-custom-scrape-jobs.md b/docs/sources/operator/add-custom-scrape-jobs.md deleted file mode 100644 index 6f4fb9cc02..0000000000 --- a/docs/sources/operator/add-custom-scrape-jobs.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/add-custom-scrape-jobs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/add-custom-scrape-jobs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/add-custom-scrape-jobs/ -- /docs/grafana-cloud/send-data/agent/operator/add-custom-scrape-jobs/ -canonical: https://grafana.com/docs/agent/latest/operator/add-custom-scrape-jobs/ -description: Learn how to add custom scrape jobs -title: Add custom scrape jobs -weight: 400 ---- - -# Add custom scrape jobs - -Sometimes you want to add a scrape job for something that isn't supported by the -standard set of Prometheus Operator CRDs. A common example of this is node-level -metrics. - -To do this, you'll need to write custom scrape configs and store it in a -Kubernetes Secret: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: extra-jobs - namespace: operator -stringData: - jobs.yaml: | - -``` - -Replace `` above with the array of Prometheus scrape jobs to -include. - -For example, to collect metrics from Kubelet and cAdvisor, use the following: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: extra-jobs - namespace: operator -stringData: - jobs.yaml: | - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: integrations/kubernetes/kubelet - kubernetes_sd_configs: - - role: node - relabel_configs: - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - source_labels: [__meta_kubernetes_node_name] - replacement: /api/v1/nodes/$1/proxy/metrics - target_label: __metrics_path__ - - action: hashmod - modulus: $(SHARDS) - source_labels: - - __address__ - target_label: __tmp_hash - - action: keep - regex: $(SHARD) - source_labels: - - __tmp_hash - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - job_name: integrations/kubernetes/cadvisor - kubernetes_sd_configs: - - role: node - relabel_configs: - - replacement: kubernetes.default.svc:443 - target_label: __address__ - - regex: (.+) - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - source_labels: - - __meta_kubernetes_node_name - target_label: __metrics_path__ - - action: hashmod - modulus: $(SHARDS) - source_labels: - - __address__ - target_label: __tmp_hash - - action: keep - regex: $(SHARD) - source_labels: - - __tmp_hash - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -``` - -Note that you **should** always add these two relabel_configs for each custom job: - -```yaml -- action: hashmod - modulus: $(SHARDS) - source_labels: - - __address__ - target_label: __tmp_hash -- action: keep - regex: $(SHARD) - source_labels: - - __tmp_hash -``` - -These rules ensure if your GrafanaAgent has multiple metrics shards, only one -pod per replica will collect metrics for each job. - -Once your Secret is defined, you'll then need to add a `additionalScrapeConfigs` -field to your MetricsInstance: - -```yaml -apiVersion: monitoring.grafana.com/v1alpha1 -kind: MetricsInstance -metadata: - labels: - name: grafana-agent - name: primary - namespace: operator -spec: - additionalScrapeConfigs: - name: extra-jobs - key: jobs.yaml - # ... Other settings ... -``` - -The Secret **MUST** be in the same namespace as the MetricsInstance. diff --git a/docs/sources/operator/api.md b/docs/sources/operator/api.md deleted file mode 100644 index 04df805f73..0000000000 --- a/docs/sources/operator/api.md +++ /dev/null @@ -1,566 +0,0 @@ ---- -aliases: -- /docs/agent/latest/operator/crd/ -- /docs/grafana-cloud/agent/operator/api/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/api/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/api/ -- /docs/grafana-cloud/send-data/agent/operator/api/ -canonical: https://grafana.com/docs/agent/latest/operator/api/ -title: Custom Resource Definition Reference -description: Learn about the Grafana Agent API -weight: 500 ---- -# Custom Resource Definition Reference -## Resource Types: -* [Deployment](#monitoring.grafana.com/v1alpha1.Deployment) -* [GrafanaAgent](#monitoring.grafana.com/v1alpha1.GrafanaAgent) -* [IntegrationsDeployment](#monitoring.grafana.com/v1alpha1.IntegrationsDeployment) -* [LogsDeployment](#monitoring.grafana.com/v1alpha1.LogsDeployment) -* [MetricsDeployment](#monitoring.grafana.com/v1alpha1.MetricsDeployment) -### Deployment -Deployment is a set of discovered resources relative to a GrafanaAgent. The tree of resources contained in a Deployment form the resource hierarchy used for reconciling a GrafanaAgent. -#### Fields -|Field|Description| -|-|-| -|apiVersion|string
`monitoring.grafana.com/v1alpha1`| -|kind|string
`Deployment`| -|`Agent`
_[GrafanaAgent](#monitoring.grafana.com/v1alpha1.GrafanaAgent)_| Root resource in the deployment. | -|`Metrics`
_[[]MetricsDeployment](#monitoring.grafana.com/v1alpha1.MetricsDeployment)_| Metrics resources discovered by Agent. | -|`Logs`
_[[]LogsDeployment](#monitoring.grafana.com/v1alpha1.LogsDeployment)_| Logs resources discovered by Agent. | -|`Integrations`
_[[]IntegrationsDeployment](#monitoring.grafana.com/v1alpha1.IntegrationsDeployment)_| Integrations resources discovered by Agent. | -|`Secrets`
_[github.com/grafana/agent/internal/static/operator/assets.SecretStore](https://pkg.go.dev/github.com/grafana/agent/internal/static/operator/assets#SecretStore)_| The full list of Secrets referenced by resources in the Deployment. | -### GrafanaAgent -(Appears on:[Deployment](#monitoring.grafana.com/v1alpha1.Deployment)) -GrafanaAgent defines a Grafana Agent deployment. -#### Fields -|Field|Description| -|-|-| -|apiVersion|string
`monitoring.grafana.com/v1alpha1`| -|kind|string
`GrafanaAgent`| -|`metadata`
_[Kubernetes meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_| Refer to the Kubernetes API documentation for the fields of the `metadata` field. | -|`spec`
_[GrafanaAgentSpec](#monitoring.grafana.com/v1alpha1.GrafanaAgentSpec)_| Spec holds the specification of the desired behavior for the Grafana Agent cluster. | -|`logLevel`
_string_| LogLevel controls the log level of the generated pods. Defaults to "info" if not set. | -|`logFormat`
_string_| LogFormat controls the logging format of the generated pods. Defaults to "logfmt" if not set. | -|`apiServer`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.APIServerConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.APIServerConfig)_| APIServerConfig lets you specify a host and auth methods to access the Kubernetes API server. If left empty, the Agent assumes that it is running inside of the cluster and will discover API servers automatically and use the pod's CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount. | -|`podMetadata`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.EmbeddedObjectMetadata](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.EmbeddedObjectMetadata)_| PodMetadata configures Labels and Annotations which are propagated to created Grafana Agent pods. | -|`version`
_string_| Version of Grafana Agent to be deployed. | -|`paused`
_bool_| Paused prevents actions except for deletion to be performed on the underlying managed objects. | -|`image`
_string_| Image, when specified, overrides the image used to run Agent. Specify the image along with a tag. You still need to set the version to ensure Grafana Agent Operator knows which version of Grafana Agent is being configured. | -|`configReloaderVersion`
_string_| Version of Config Reloader to be deployed. | -|`configReloaderImage`
_string_| Image, when specified, overrides the image used to run Config Reloader. Specify the image along with a tag. You still need to set the version to ensure Grafana Agent Operator knows which version of Grafana Agent is being configured. | -|`imagePullSecrets`
_[[]Kubernetes core/v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#localobjectreference-v1-core)_| ImagePullSecrets holds an optional list of references to Secrets within the same namespace used for pulling the Grafana Agent image from registries. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | -|`storage`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.StorageSpec](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.StorageSpec)_| Storage spec to specify how storage will be used. | -|`volumes`
_[[]Kubernetes core/v1.Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volume-v1-core)_| Volumes allows configuration of additional volumes on the output StatefulSet definition. The volumes specified are appended to other volumes that are generated as a result of StorageSpec objects. | -|`volumeMounts`
_[[]Kubernetes core/v1.VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volumemount-v1-core)_| VolumeMounts lets you configure additional VolumeMounts on the output StatefulSet definition. Specified VolumeMounts are appended to other VolumeMounts generated as a result of StorageSpec objects in the Grafana Agent container. | -|`resources`
_[Kubernetes core/v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#resourcerequirements-v1-core)_| Resources holds requests and limits for individual pods. | -|`nodeSelector`
_map[string]string_| NodeSelector defines which nodes pods should be scheduling on. | -|`serviceAccountName`
_string_| ServiceAccountName is the name of the ServiceAccount to use for running Grafana Agent pods. | -|`secrets`
_[]string_| Secrets is a list of secrets in the same namespace as the GrafanaAgent object which will be mounted into each running Grafana Agent pod. The secrets are mounted into /var/lib/grafana-agent/extra-secrets/<secret-name>. | -|`configMaps`
_[]string_| ConfigMaps is a list of config maps in the same namespace as the GrafanaAgent object which will be mounted into each running Grafana Agent pod. The ConfigMaps are mounted into /var/lib/grafana-agent/extra-configmaps/<configmap-name>. | -|`affinity`
_[Kubernetes core/v1.Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#affinity-v1-core)_| Affinity, if specified, controls pod scheduling constraints. | -|`tolerations`
_[[]Kubernetes core/v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#toleration-v1-core)_| Tolerations, if specified, controls the pod's tolerations. | -|`topologySpreadConstraints`
_[[]Kubernetes core/v1.TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core)_| TopologySpreadConstraints, if specified, controls the pod's topology spread constraints. | -|`securityContext`
_[Kubernetes core/v1.PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#podsecuritycontext-v1-core)_| SecurityContext holds pod-level security attributes and common container settings. When unspecified, defaults to the default PodSecurityContext. | -|`containers`
_[[]Kubernetes core/v1.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#container-v1-core)_| Containers lets you inject additional containers or modify operator-generated containers. This can be used to add an authentication proxy to a Grafana Agent pod or to change the behavior of an operator-generated container. Containers described here modify an operator-generated container if they share the same name and if modifications are done via a strategic merge patch. The current container names are: `grafana-agent` and `config-reloader`. Overriding containers is entirely outside the scope of what the Grafana Agent team supports and by doing so, you accept that this behavior may break at any time without notice. | -|`initContainers`
_[[]Kubernetes core/v1.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#container-v1-core)_| InitContainers let you add initContainers to the pod definition. These can be used to, for example, fetch secrets for injection into the Grafana Agent configuration from external sources. Errors during the execution of an initContainer cause the pod to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ Using initContainers for any use case other than secret fetching is entirely outside the scope of what the Grafana Agent maintainers support and by doing so, you accept that this behavior may break at any time without notice. | -|`priorityClassName`
_string_| PriorityClassName is the priority class assigned to pods. | -|`runtimeClassName`
_string_| RuntimeClassName is the runtime class assigned to pods. | -|`portName`
_string_| Port name used for the pods and governing service. This defaults to agent-metrics. | -|`metrics`
_[MetricsSubsystemSpec](#monitoring.grafana.com/v1alpha1.MetricsSubsystemSpec)_| Metrics controls the metrics subsystem of the Agent and settings unique to metrics-specific pods that are deployed. | -|`logs`
_[LogsSubsystemSpec](#monitoring.grafana.com/v1alpha1.LogsSubsystemSpec)_| Logs controls the logging subsystem of the Agent and settings unique to logging-specific pods that are deployed. | -|`integrations`
_[IntegrationsSubsystemSpec](#monitoring.grafana.com/v1alpha1.IntegrationsSubsystemSpec)_| Integrations controls the integration subsystem of the Agent and settings unique to deployed integration-specific pods. | -|`enableConfigReadAPI`
_bool_| enableConfigReadAPI enables the read API for viewing the currently running config port 8080 on the agent. +kubebuilder:default=false | -|`disableReporting`
_bool_| disableReporting disables reporting of enabled feature flags to Grafana. +kubebuilder:default=false | -|`disableSupportBundle`
_bool_| disableSupportBundle disables the generation of support bundles. +kubebuilder:default=false | -### IntegrationsDeployment -(Appears on:[Deployment](#monitoring.grafana.com/v1alpha1.Deployment)) -IntegrationsDeployment is a set of discovered resources relative to an IntegrationsDeployment. -#### Fields -|Field|Description| -|-|-| -|apiVersion|string
`monitoring.grafana.com/v1alpha1`| -|kind|string
`IntegrationsDeployment`| -|`Instance`
_[Integration](#monitoring.grafana.com/v1alpha1.Integration)_| | -### LogsDeployment -(Appears on:[Deployment](#monitoring.grafana.com/v1alpha1.Deployment)) -LogsDeployment is a set of discovered resources relative to a LogsInstance. -#### Fields -|Field|Description| -|-|-| -|apiVersion|string
`monitoring.grafana.com/v1alpha1`| -|kind|string
`LogsDeployment`| -|`Instance`
_[LogsInstance](#monitoring.grafana.com/v1alpha1.LogsInstance)_| | -|`PodLogs`
_[[]PodLogs](#monitoring.grafana.com/v1alpha1.PodLogs)_| | -### MetricsDeployment -(Appears on:[Deployment](#monitoring.grafana.com/v1alpha1.Deployment)) -MetricsDeployment is a set of discovered resources relative to a MetricsInstance. -#### Fields -|Field|Description| -|-|-| -|apiVersion|string
`monitoring.grafana.com/v1alpha1`| -|kind|string
`MetricsDeployment`| -|`Instance`
_[MetricsInstance](#monitoring.grafana.com/v1alpha1.MetricsInstance)_| | -|`ServiceMonitors`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.ServiceMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.ServiceMonitor)_| | -|`PodMonitors`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.PodMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor)_| | -|`Probes`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.Probe](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.Probe)_| | -### CRIStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -CRIStageSpec is a parsing stage that reads log lines using the standard CRI logging format. It needs no defined fields. -### DockerStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -DockerStageSpec is a parsing stage that reads log lines using the standard Docker logging format. It needs no defined fields. -### DropStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -DropStageSpec is a filtering stage that lets you drop certain logs. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from the extract data to parse. If empty, uses the log message. | -|`expression`
_string_| RE2 regular expression. If source is provided, the regex attempts to match the source. If no source is provided, then the regex attempts to attach the log line. If the provided regex matches the log line or a provided source, the line is dropped. | -|`value`
_string_| Value can only be specified when source is specified. If the value provided is an exact match for the given source then the line will be dropped. Mutually exclusive with expression. | -|`olderThan`
_string_| OlderThan will be parsed as a Go duration. If the log line's timestamp is older than the current time minus the provided duration, it will be dropped. | -|`longerThan`
_string_| LongerThan will drop a log line if it its content is longer than this value (in bytes). Can be expressed as an integer (8192) or a number with a suffix (8kb). | -|`dropCounterReason`
_string_| Every time a log line is dropped, the metric logentry_dropped_lines_total is incremented. A "reason" label is added, and can be customized by providing a custom value here. Defaults to "drop_stage". | -### GrafanaAgentSpec -(Appears on:[GrafanaAgent](#monitoring.grafana.com/v1alpha1.GrafanaAgent)) -GrafanaAgentSpec is a specification of the desired behavior of the Grafana Agent cluster. -#### Fields -|Field|Description| -|-|-| -|`logLevel`
_string_| LogLevel controls the log level of the generated pods. Defaults to "info" if not set. | -|`logFormat`
_string_| LogFormat controls the logging format of the generated pods. Defaults to "logfmt" if not set. | -|`apiServer`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.APIServerConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.APIServerConfig)_| APIServerConfig lets you specify a host and auth methods to access the Kubernetes API server. If left empty, the Agent assumes that it is running inside of the cluster and will discover API servers automatically and use the pod's CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount. | -|`podMetadata`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.EmbeddedObjectMetadata](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.EmbeddedObjectMetadata)_| PodMetadata configures Labels and Annotations which are propagated to created Grafana Agent pods. | -|`version`
_string_| Version of Grafana Agent to be deployed. | -|`paused`
_bool_| Paused prevents actions except for deletion to be performed on the underlying managed objects. | -|`image`
_string_| Image, when specified, overrides the image used to run Agent. Specify the image along with a tag. You still need to set the version to ensure Grafana Agent Operator knows which version of Grafana Agent is being configured. | -|`configReloaderVersion`
_string_| Version of Config Reloader to be deployed. | -|`configReloaderImage`
_string_| Image, when specified, overrides the image used to run Config Reloader. Specify the image along with a tag. You still need to set the version to ensure Grafana Agent Operator knows which version of Grafana Agent is being configured. | -|`imagePullSecrets`
_[[]Kubernetes core/v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#localobjectreference-v1-core)_| ImagePullSecrets holds an optional list of references to Secrets within the same namespace used for pulling the Grafana Agent image from registries. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | -|`storage`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.StorageSpec](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.StorageSpec)_| Storage spec to specify how storage will be used. | -|`volumes`
_[[]Kubernetes core/v1.Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volume-v1-core)_| Volumes allows configuration of additional volumes on the output StatefulSet definition. The volumes specified are appended to other volumes that are generated as a result of StorageSpec objects. | -|`volumeMounts`
_[[]Kubernetes core/v1.VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volumemount-v1-core)_| VolumeMounts lets you configure additional VolumeMounts on the output StatefulSet definition. Specified VolumeMounts are appended to other VolumeMounts generated as a result of StorageSpec objects in the Grafana Agent container. | -|`resources`
_[Kubernetes core/v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#resourcerequirements-v1-core)_| Resources holds requests and limits for individual pods. | -|`nodeSelector`
_map[string]string_| NodeSelector defines which nodes pods should be scheduling on. | -|`serviceAccountName`
_string_| ServiceAccountName is the name of the ServiceAccount to use for running Grafana Agent pods. | -|`secrets`
_[]string_| Secrets is a list of secrets in the same namespace as the GrafanaAgent object which will be mounted into each running Grafana Agent pod. The secrets are mounted into /var/lib/grafana-agent/extra-secrets/<secret-name>. | -|`configMaps`
_[]string_| ConfigMaps is a list of config maps in the same namespace as the GrafanaAgent object which will be mounted into each running Grafana Agent pod. The ConfigMaps are mounted into /var/lib/grafana-agent/extra-configmaps/<configmap-name>. | -|`affinity`
_[Kubernetes core/v1.Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#affinity-v1-core)_| Affinity, if specified, controls pod scheduling constraints. | -|`tolerations`
_[[]Kubernetes core/v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#toleration-v1-core)_| Tolerations, if specified, controls the pod's tolerations. | -|`topologySpreadConstraints`
_[[]Kubernetes core/v1.TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core)_| TopologySpreadConstraints, if specified, controls the pod's topology spread constraints. | -|`securityContext`
_[Kubernetes core/v1.PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#podsecuritycontext-v1-core)_| SecurityContext holds pod-level security attributes and common container settings. When unspecified, defaults to the default PodSecurityContext. | -|`containers`
_[[]Kubernetes core/v1.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#container-v1-core)_| Containers lets you inject additional containers or modify operator-generated containers. This can be used to add an authentication proxy to a Grafana Agent pod or to change the behavior of an operator-generated container. Containers described here modify an operator-generated container if they share the same name and if modifications are done via a strategic merge patch. The current container names are: `grafana-agent` and `config-reloader`. Overriding containers is entirely outside the scope of what the Grafana Agent team supports and by doing so, you accept that this behavior may break at any time without notice. | -|`initContainers`
_[[]Kubernetes core/v1.Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#container-v1-core)_| InitContainers let you add initContainers to the pod definition. These can be used to, for example, fetch secrets for injection into the Grafana Agent configuration from external sources. Errors during the execution of an initContainer cause the pod to restart. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ Using initContainers for any use case other than secret fetching is entirely outside the scope of what the Grafana Agent maintainers support and by doing so, you accept that this behavior may break at any time without notice. | -|`priorityClassName`
_string_| PriorityClassName is the priority class assigned to pods. | -|`runtimeClassName`
_string_| RuntimeClassName is the runtime class assigned to pods. | -|`portName`
_string_| Port name used for the pods and governing service. This defaults to agent-metrics. | -|`metrics`
_[MetricsSubsystemSpec](#monitoring.grafana.com/v1alpha1.MetricsSubsystemSpec)_| Metrics controls the metrics subsystem of the Agent and settings unique to metrics-specific pods that are deployed. | -|`logs`
_[LogsSubsystemSpec](#monitoring.grafana.com/v1alpha1.LogsSubsystemSpec)_| Logs controls the logging subsystem of the Agent and settings unique to logging-specific pods that are deployed. | -|`integrations`
_[IntegrationsSubsystemSpec](#monitoring.grafana.com/v1alpha1.IntegrationsSubsystemSpec)_| Integrations controls the integration subsystem of the Agent and settings unique to deployed integration-specific pods. | -|`enableConfigReadAPI`
_bool_| enableConfigReadAPI enables the read API for viewing the currently running config port 8080 on the agent. +kubebuilder:default=false | -|`disableReporting`
_bool_| disableReporting disables reporting of enabled feature flags to Grafana. +kubebuilder:default=false | -|`disableSupportBundle`
_bool_| disableSupportBundle disables the generation of support bundles. +kubebuilder:default=false | -### Integration -(Appears on:[IntegrationsDeployment](#monitoring.grafana.com/v1alpha1.IntegrationsDeployment)) -Integration runs a single Grafana Agent integration. Integrations that generate telemetry must be configured to send that telemetry somewhere, such as autoscrape for exporter-based integrations. Integrations have access to the LogsInstances and MetricsInstances in the same GrafanaAgent resource set, referenced by the <namespace>/<name> of the Instance resource. For example, if there is a default/production MetricsInstance, you can configure a supported integration's autoscrape block with: autoscrape: enable: true metrics_instance: default/production There is currently no way for telemetry created by an Operator-managed integration to be collected from outside of the integration itself. -#### Fields -|Field|Description| -|-|-| -|`metadata`
_[Kubernetes meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_| Refer to the Kubernetes API documentation for the fields of the `metadata` field. | -|`spec`
_[IntegrationSpec](#monitoring.grafana.com/v1alpha1.IntegrationSpec)_| Specifies the desired behavior of the Integration. | -|`name`
_string_| Name of the integration to run (e.g., "node_exporter", "mysqld_exporter"). | -|`type`
_[IntegrationType](#monitoring.grafana.com/v1alpha1.IntegrationType)_| Type informs Grafana Agent Operator about how to manage the integration being configured. | -|`config`
_[k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON](https://pkg.go.dev/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1#JSON)_| The configuration for the named integration. Note that Integrations are deployed with the integrations-next feature flag, which has different common settings: https://grafana.com/docs/agent/latest/configuration/integrations/integrations-next/ | -|`volumes`
_[[]Kubernetes core/v1.Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volume-v1-core)_| An extra list of Volumes to be associated with the Grafana Agent pods running this integration. Volume names are mutated to be unique across all Integrations. Note that the specified volumes should be able to tolerate existing on multiple pods at once when type is daemonset. Don't use volumes for loading Secrets or ConfigMaps from the same namespace as the Integration; use the Secrets and ConfigMaps fields instead. | -|`volumeMounts`
_[[]Kubernetes core/v1.VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volumemount-v1-core)_| An extra list of VolumeMounts to be associated with the Grafana Agent pods running this integration. VolumeMount names are mutated to be unique across all used IntegrationSpecs. Mount paths should include the namespace/name of the Integration CR to avoid potentially colliding with other resources. | -|`secrets`
_[[]Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| An extra list of keys from Secrets in the same namespace as the Integration which will be mounted into the Grafana Agent pod running this Integration. Secrets will be mounted at /etc/grafana-agent/integrations/secrets/<secret_namespace>/<secret_name>/<key>. | -|`configMaps`
_[[]Kubernetes core/v1.ConfigMapKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#configmapkeyselector-v1-core)_| An extra list of keys from ConfigMaps in the same namespace as the Integration which will be mounted into the Grafana Agent pod running this Integration. ConfigMaps are mounted at /etc/grafana-agent/integrations/configMaps/<configmap_namespace>/<configmap_name>/<key>. | -### IntegrationSpec -(Appears on:[Integration](#monitoring.grafana.com/v1alpha1.Integration)) -IntegrationSpec specifies the desired behavior of a metrics integration. -#### Fields -|Field|Description| -|-|-| -|`name`
_string_| Name of the integration to run (e.g., "node_exporter", "mysqld_exporter"). | -|`type`
_[IntegrationType](#monitoring.grafana.com/v1alpha1.IntegrationType)_| Type informs Grafana Agent Operator about how to manage the integration being configured. | -|`config`
_[k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON](https://pkg.go.dev/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1#JSON)_| The configuration for the named integration. Note that Integrations are deployed with the integrations-next feature flag, which has different common settings: https://grafana.com/docs/agent/latest/configuration/integrations/integrations-next/ | -|`volumes`
_[[]Kubernetes core/v1.Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volume-v1-core)_| An extra list of Volumes to be associated with the Grafana Agent pods running this integration. Volume names are mutated to be unique across all Integrations. Note that the specified volumes should be able to tolerate existing on multiple pods at once when type is daemonset. Don't use volumes for loading Secrets or ConfigMaps from the same namespace as the Integration; use the Secrets and ConfigMaps fields instead. | -|`volumeMounts`
_[[]Kubernetes core/v1.VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#volumemount-v1-core)_| An extra list of VolumeMounts to be associated with the Grafana Agent pods running this integration. VolumeMount names are mutated to be unique across all used IntegrationSpecs. Mount paths should include the namespace/name of the Integration CR to avoid potentially colliding with other resources. | -|`secrets`
_[[]Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| An extra list of keys from Secrets in the same namespace as the Integration which will be mounted into the Grafana Agent pod running this Integration. Secrets will be mounted at /etc/grafana-agent/integrations/secrets/<secret_namespace>/<secret_name>/<key>. | -|`configMaps`
_[[]Kubernetes core/v1.ConfigMapKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#configmapkeyselector-v1-core)_| An extra list of keys from ConfigMaps in the same namespace as the Integration which will be mounted into the Grafana Agent pod running this Integration. ConfigMaps are mounted at /etc/grafana-agent/integrations/configMaps/<configmap_namespace>/<configmap_name>/<key>. | -### IntegrationType -(Appears on:[IntegrationSpec](#monitoring.grafana.com/v1alpha1.IntegrationSpec)) -IntegrationType determines specific behaviors of a configured integration. -#### Fields -|Field|Description| -|-|-| -|`allNodes`
_bool_| When true, the configured integration should be run on every Node in the cluster. This is required for Integrations that generate Node-specific metrics like node_exporter, otherwise it must be false to avoid generating duplicate metrics. | -|`unique`
_bool_| Whether this integration can only be defined once for a Grafana Agent process, such as statsd_exporter. It is invalid for a GrafanaAgent to discover multiple unique Integrations with the same Integration name (i.e., a single GrafanaAgent cannot deploy two statsd_exporters). | -### IntegrationsSubsystemSpec -(Appears on:[GrafanaAgentSpec](#monitoring.grafana.com/v1alpha1.GrafanaAgentSpec)) -IntegrationsSubsystemSpec defines global settings to apply across the integrations subsystem. -#### Fields -|Field|Description| -|-|-| -|`selector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Label selector to find Integration resources to run. When nil, no integration resources will be defined. | -|`namespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Label selector for namespaces to search when discovering integration resources. If nil, integration resources are only discovered in the namespace of the GrafanaAgent resource. Set to `{}` to search all namespaces. | -### JSONStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -JSONStageSpec is a parsing stage that reads the log line as JSON and accepts JMESPath expressions to extract data. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from the extracted data to parse as JSON. If empty, uses entire log message. | -|`expressions`
_map[string]string_| Set of the key/value pairs of JMESPath expressions. The key will be the key in the extracted data while the expression will be the value, evaluated as a JMESPath from the source data. Literal JMESPath expressions can be used by wrapping a key in double quotes, which then must be wrapped again in single quotes in YAML so they get passed to the JMESPath parser. | -### LimitStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -The limit stage is a rate-limiting stage that throttles logs based on several options. -#### Fields -|Field|Description| -|-|-| -|`rate`
_int_| The rate limit in lines per second that Promtail will push to Loki. | -|`burst`
_int_| The cap in the quantity of burst lines that Promtail will push to Loki. | -|`drop`
_bool_| When drop is true, log lines that exceed the current rate limit are discarded. When drop is false, log lines that exceed the current rate limit wait to enter the back pressure mode. Defaults to false. | -### LogsBackoffConfigSpec -(Appears on:[LogsClientSpec](#monitoring.grafana.com/v1alpha1.LogsClientSpec)) -LogsBackoffConfigSpec configures timing for retrying failed requests. -#### Fields -|Field|Description| -|-|-| -|`minPeriod`
_string_| Initial backoff time between retries. Time between retries is increased exponentially. | -|`maxPeriod`
_string_| Maximum backoff time between retries. | -|`maxRetries`
_int_| Maximum number of retries to perform before giving up a request. | -### LogsClientSpec -(Appears on:[LogsInstanceSpec](#monitoring.grafana.com/v1alpha1.LogsInstanceSpec), [LogsSubsystemSpec](#monitoring.grafana.com/v1alpha1.LogsSubsystemSpec)) -LogsClientSpec defines the client integration for logs, indicating which Loki server to send logs to. -#### Fields -|Field|Description| -|-|-| -|`url`
_string_| URL is the URL where Loki is listening. Must be a full HTTP URL, including protocol. Required. Example: https://logs-prod-us-central1.grafana.net/loki/api/v1/push. | -|`tenantId`
_string_| Tenant ID used by default to push logs to Loki. If omitted assumes remote Loki is running in single-tenant mode or an authentication layer is used to inject an X-Scope-OrgID header. | -|`batchWait`
_string_| Maximum amount of time to wait before sending a batch, even if that batch isn't full. | -|`batchSize`
_int_| Maximum batch size (in bytes) of logs to accumulate before sending the batch to Loki. | -|`basicAuth`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.BasicAuth)_| BasicAuth for the Loki server. | -|`oauth2`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.OAuth2](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.OAuth2)_| Oauth2 for URL | -|`bearerToken`
_string_| BearerToken used for remote_write. | -|`bearerTokenFile`
_string_| BearerTokenFile used to read bearer token. | -|`proxyUrl`
_string_| ProxyURL to proxy requests through. Optional. | -|`tlsConfig`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.TLSConfig)_| TLSConfig to use for the client. Only used when the protocol of the URL is https. | -|`backoffConfig`
_[LogsBackoffConfigSpec](#monitoring.grafana.com/v1alpha1.LogsBackoffConfigSpec)_| Configures how to retry requests to Loki when a request fails. Defaults to a minPeriod of 500ms, maxPeriod of 5m, and maxRetries of 10. | -|`externalLabels`
_map[string]string_| ExternalLabels are labels to add to any time series when sending data to Loki. | -|`timeout`
_string_| Maximum time to wait for a server to respond to a request. | -### LogsInstance -(Appears on:[LogsDeployment](#monitoring.grafana.com/v1alpha1.LogsDeployment)) -LogsInstance controls an individual logs instance within a Grafana Agent deployment. -#### Fields -|Field|Description| -|-|-| -|`metadata`
_[Kubernetes meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_| Refer to the Kubernetes API documentation for the fields of the `metadata` field. | -|`spec`
_[LogsInstanceSpec](#monitoring.grafana.com/v1alpha1.LogsInstanceSpec)_| Spec holds the specification of the desired behavior for the logs instance. | -|`clients`
_[[]LogsClientSpec](#monitoring.grafana.com/v1alpha1.LogsClientSpec)_| Clients controls where logs are written to for this instance. | -|`podLogsSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Determines which PodLogs should be selected for including in this instance. | -|`podLogsNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Set of labels to determine which namespaces should be watched for PodLogs. If not provided, checks only namespace of the instance. | -|`additionalScrapeConfigs`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| AdditionalScrapeConfigs allows specifying a key of a Secret containing additional Grafana Agent logging scrape configurations. Scrape configurations specified are appended to the configurations generated by the Grafana Agent Operator. Job configurations specified must have the form as specified in the official Promtail documentation: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Grafana Agent. It is advised to review both Grafana Agent and Promtail release notes to ensure that no incompatible scrape configs are going to break Grafana Agent after the upgrade. | -|`targetConfig`
_[LogsTargetConfigSpec](#monitoring.grafana.com/v1alpha1.LogsTargetConfigSpec)_| Configures how tailed targets are watched. | -### LogsInstanceSpec -(Appears on:[LogsInstance](#monitoring.grafana.com/v1alpha1.LogsInstance)) -LogsInstanceSpec controls how an individual instance will be used to discover LogMonitors. -#### Fields -|Field|Description| -|-|-| -|`clients`
_[[]LogsClientSpec](#monitoring.grafana.com/v1alpha1.LogsClientSpec)_| Clients controls where logs are written to for this instance. | -|`podLogsSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Determines which PodLogs should be selected for including in this instance. | -|`podLogsNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Set of labels to determine which namespaces should be watched for PodLogs. If not provided, checks only namespace of the instance. | -|`additionalScrapeConfigs`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| AdditionalScrapeConfigs allows specifying a key of a Secret containing additional Grafana Agent logging scrape configurations. Scrape configurations specified are appended to the configurations generated by the Grafana Agent Operator. Job configurations specified must have the form as specified in the official Promtail documentation: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Grafana Agent. It is advised to review both Grafana Agent and Promtail release notes to ensure that no incompatible scrape configs are going to break Grafana Agent after the upgrade. | -|`targetConfig`
_[LogsTargetConfigSpec](#monitoring.grafana.com/v1alpha1.LogsTargetConfigSpec)_| Configures how tailed targets are watched. | -### LogsSubsystemSpec -(Appears on:[GrafanaAgentSpec](#monitoring.grafana.com/v1alpha1.GrafanaAgentSpec)) -LogsSubsystemSpec defines global settings to apply across the logging subsystem. -#### Fields -|Field|Description| -|-|-| -|`clients`
_[[]LogsClientSpec](#monitoring.grafana.com/v1alpha1.LogsClientSpec)_| A global set of clients to use when a discovered LogsInstance does not have any clients defined. | -|`logsExternalLabelName`
_string_| LogsExternalLabelName is the name of the external label used to denote Grafana Agent cluster. Defaults to "cluster." External label will _not_ be added when value is set to the empty string. | -|`instanceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| InstanceSelector determines which LogInstances should be selected for running. Each instance runs its own set of Prometheus components, including service discovery, scraping, and remote_write. | -|`instanceNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| InstanceNamespaceSelector are the set of labels to determine which namespaces to watch for LogInstances. If not provided, only checks own namespace. | -|`ignoreNamespaceSelectors`
_bool_| IgnoreNamespaceSelectors, if true, will ignore NamespaceSelector settings from the PodLogs configs, and they will only discover endpoints within their current namespace. | -|`enforcedNamespaceLabel`
_string_| EnforcedNamespaceLabel enforces adding a namespace label of origin for each metric that is user-created. The label value will always be the namespace of the object that is being created. | -### LogsTargetConfigSpec -(Appears on:[LogsInstanceSpec](#monitoring.grafana.com/v1alpha1.LogsInstanceSpec)) -LogsTargetConfigSpec configures how tailed targets are watched. -#### Fields -|Field|Description| -|-|-| -|`syncPeriod`
_string_| Period to resync directories being watched and files being tailed to discover new ones or stop watching removed ones. | -### MatchStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -MatchStageSpec is a filtering stage that conditionally applies a set of stages or drop entries when a log entry matches a configurable LogQL stream selector and filter expressions. -#### Fields -|Field|Description| -|-|-| -|`selector`
_string_| LogQL stream selector and filter expressions. Required. | -|`pipelineName`
_string_| Names the pipeline. When defined, creates an additional label in the pipeline_duration_seconds histogram, where the value is concatenated with job_name using an underscore. | -|`action`
_string_| Determines what action is taken when the selector matches the log line. Can be keep or drop. Defaults to keep. When set to drop, entries are dropped and no later metrics are recorded. Stages must be empty when dropping metrics. | -|`dropCounterReason`
_string_| Every time a log line is dropped, the metric logentry_dropped_lines_total is incremented. A "reason" label is added, and can be customized by providing a custom value here. Defaults to "match_stage." | -|`stages`
_string_| Nested set of pipeline stages to execute when action is keep and the log line matches selector. An example value for stages may be: stages: | - json: {} - labelAllow: [foo, bar] Note that stages is a string because SIG API Machinery does not support recursive types, and so it cannot be validated for correctness. Be careful not to mistype anything. | -### MetadataConfig -(Appears on:[RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)) -MetadataConfig configures the sending of series metadata to remote storage. -#### Fields -|Field|Description| -|-|-| -|`send`
_bool_| Send enables metric metadata to be sent to remote storage. | -|`sendInterval`
_string_| SendInterval controls how frequently metric metadata is sent to remote storage. | -### MetricsInstance -(Appears on:[MetricsDeployment](#monitoring.grafana.com/v1alpha1.MetricsDeployment)) -MetricsInstance controls an individual Metrics instance within a Grafana Agent deployment. -#### Fields -|Field|Description| -|-|-| -|`metadata`
_[Kubernetes meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_| Refer to the Kubernetes API documentation for the fields of the `metadata` field. | -|`spec`
_[MetricsInstanceSpec](#monitoring.grafana.com/v1alpha1.MetricsInstanceSpec)_| Spec holds the specification of the desired behavior for the Metrics instance. | -|`walTruncateFrequency`
_string_| WALTruncateFrequency specifies how frequently to run the WAL truncation process. Higher values cause the WAL to increase and for old series to stay in the WAL longer, but reduces the chance of data loss when remote_write fails for longer than the given frequency. | -|`minWALTime`
_string_| MinWALTime is the minimum amount of time that series and samples can exist in the WAL before being considered for deletion. | -|`maxWALTime`
_string_| MaxWALTime is the maximum amount of time that series and samples can exist in the WAL before being forcibly deleted. | -|`remoteFlushDeadline`
_string_| RemoteFlushDeadline is the deadline for flushing data when an instance shuts down. | -|`writeStaleOnShutdown`
_bool_| WriteStaleOnShutdown writes staleness markers on shutdown for all series. | -|`serviceMonitorSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ServiceMonitorSelector determines which ServiceMonitors to select for target discovery. | -|`serviceMonitorNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ServiceMonitorNamespaceSelector is the set of labels that determine which namespaces to watch for ServiceMonitor discovery. If nil, it only checks its own namespace. | -|`podMonitorSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| PodMonitorSelector determines which PodMonitors to selected for target discovery. Experimental. | -|`podMonitorNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| PodMonitorNamespaceSelector are the set of labels to determine which namespaces to watch for PodMonitor discovery. If nil, it only checks its own namespace. | -|`probeSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ProbeSelector determines which Probes to select for target discovery. | -|`probeNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ProbeNamespaceSelector is the set of labels that determines which namespaces to watch for Probe discovery. If nil, it only checks own namespace. | -|`remoteWrite`
_[[]RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)_| RemoteWrite controls remote_write settings for this instance. | -|`additionalScrapeConfigs`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| AdditionalScrapeConfigs lets you specify a key of a Secret containing additional Grafana Agent Prometheus scrape configurations. The specified scrape configurations are appended to the configurations generated by Grafana Agent Operator. Specified job configurations must have the form specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are appended, you must make sure the configuration is still valid. Note that it's possible that this feature will break future upgrades of Grafana Agent. Review both Grafana Agent and Prometheus release notes to ensure that no incompatible scrape configs will break Grafana Agent after the upgrade. | -### MetricsInstanceSpec -(Appears on:[MetricsInstance](#monitoring.grafana.com/v1alpha1.MetricsInstance)) -MetricsInstanceSpec controls how an individual instance is used to discover PodMonitors. -#### Fields -|Field|Description| -|-|-| -|`walTruncateFrequency`
_string_| WALTruncateFrequency specifies how frequently to run the WAL truncation process. Higher values cause the WAL to increase and for old series to stay in the WAL longer, but reduces the chance of data loss when remote_write fails for longer than the given frequency. | -|`minWALTime`
_string_| MinWALTime is the minimum amount of time that series and samples can exist in the WAL before being considered for deletion. | -|`maxWALTime`
_string_| MaxWALTime is the maximum amount of time that series and samples can exist in the WAL before being forcibly deleted. | -|`remoteFlushDeadline`
_string_| RemoteFlushDeadline is the deadline for flushing data when an instance shuts down. | -|`writeStaleOnShutdown`
_bool_| WriteStaleOnShutdown writes staleness markers on shutdown for all series. | -|`serviceMonitorSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ServiceMonitorSelector determines which ServiceMonitors to select for target discovery. | -|`serviceMonitorNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ServiceMonitorNamespaceSelector is the set of labels that determine which namespaces to watch for ServiceMonitor discovery. If nil, it only checks its own namespace. | -|`podMonitorSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| PodMonitorSelector determines which PodMonitors to selected for target discovery. Experimental. | -|`podMonitorNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| PodMonitorNamespaceSelector are the set of labels to determine which namespaces to watch for PodMonitor discovery. If nil, it only checks its own namespace. | -|`probeSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ProbeSelector determines which Probes to select for target discovery. | -|`probeNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| ProbeNamespaceSelector is the set of labels that determines which namespaces to watch for Probe discovery. If nil, it only checks own namespace. | -|`remoteWrite`
_[[]RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)_| RemoteWrite controls remote_write settings for this instance. | -|`additionalScrapeConfigs`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| AdditionalScrapeConfigs lets you specify a key of a Secret containing additional Grafana Agent Prometheus scrape configurations. The specified scrape configurations are appended to the configurations generated by Grafana Agent Operator. Specified job configurations must have the form specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are appended, you must make sure the configuration is still valid. Note that it's possible that this feature will break future upgrades of Grafana Agent. Review both Grafana Agent and Prometheus release notes to ensure that no incompatible scrape configs will break Grafana Agent after the upgrade. | -### MetricsStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -MetricsStageSpec is an action stage that allows for defining and updating metrics based on data from the extracted map. Created metrics are not pushed to Loki or Prometheus and are instead exposed via the /metrics endpoint of the Grafana Agent pod. The Grafana Agent Operator should be configured with a MetricsInstance that discovers the logging DaemonSet to collect metrics created by this stage. -#### Fields -|Field|Description| -|-|-| -|`type`
_string_| The metric type to create. Must be one of counter, gauge, histogram. Required. | -|`description`
_string_| Sets the description for the created metric. | -|`prefix`
_string_| Sets the custom prefix name for the metric. Defaults to "promtail_custom_". | -|`source`
_string_| Key from the extracted data map to use for the metric. Defaults to the metrics name if not present. | -|`maxIdleDuration`
_string_| Label values on metrics are dynamic which can cause exported metrics to go stale. To prevent unbounded cardinality, any metrics not updated within MaxIdleDuration are removed. Must be greater or equal to 1s. Defaults to 5m. | -|`matchAll`
_bool_| If true, all log lines are counted without attempting to match the source to the extracted map. Mutually exclusive with value. Only valid for type: counter. | -|`countEntryBytes`
_bool_| If true all log line bytes are counted. Can only be set with matchAll: true and action: add. Only valid for type: counter. | -|`value`
_string_| Filters down source data and only changes the metric if the targeted value matches the provided string exactly. If not present, all data matches. | -|`action`
_string_| The action to take against the metric. Required. Must be either "inc" or "add" for type: counter or type: histogram. When type: gauge, must be one of "set", "inc", "dec", "add", or "sub". "add", "set", or "sub" requires the extracted value to be convertible to a positive float. | -|`buckets`
_[]string_| Buckets to create. Bucket values must be convertible to float64s. Extremely large or small numbers are subject to some loss of precision. Only valid for type: histogram. | -### MetricsSubsystemSpec -(Appears on:[GrafanaAgentSpec](#monitoring.grafana.com/v1alpha1.GrafanaAgentSpec)) -MetricsSubsystemSpec defines global settings to apply across the Metrics subsystem. -#### Fields -|Field|Description| -|-|-| -|`remoteWrite`
_[[]RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)_| RemoteWrite controls default remote_write settings for all instances. If an instance does not provide its own RemoteWrite settings, these will be used instead. | -|`replicas`
_int32_| Replicas of each shard to deploy for metrics pods. Number of replicas multiplied by the number of shards is the total number of pods created. | -|`shards`
_int32_| Shards to distribute targets onto. Number of replicas multiplied by the number of shards is the total number of pods created. Note that scaling down shards does not reshard data onto remaining instances; it must be manually moved. Increasing shards does not reshard data either, but it will continue to be available from the same instances. Sharding is performed on the content of the __address__ target meta-label. | -|`replicaExternalLabelName`
_string_| ReplicaExternalLabelName is the name of the metrics external label used to denote the replica name. Defaults to __replica__. The external label is _not_ added when the value is set to the empty string. | -|`metricsExternalLabelName`
_string_| MetricsExternalLabelName is the name of the external label used to denote Grafana Agent cluster. Defaults to "cluster." The external label is _not_ added when the value is set to the empty string. | -|`scrapeInterval`
_string_| ScrapeInterval is the time between consecutive scrapes. | -|`scrapeTimeout`
_string_| ScrapeTimeout is the time to wait for a target to respond before marking a scrape as failed. | -|`externalLabels`
_map[string]string_| ExternalLabels are labels to add to any time series when sending data over remote_write. | -|`arbitraryFSAccessThroughSMs`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.ArbitraryFSAccessThroughSMsConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.ArbitraryFSAccessThroughSMsConfig)_| ArbitraryFSAccessThroughSMs configures whether configuration based on a ServiceMonitor can access arbitrary files on the file system of the Grafana Agent container, e.g., bearer token files. | -|`overrideHonorLabels`
_bool_| OverrideHonorLabels, if true, overrides all configured honor_labels read from ServiceMonitor or PodMonitor and sets them to false. | -|`overrideHonorTimestamps`
_bool_| OverrideHonorTimestamps allows global enforcement for honoring timestamps in all scrape configs. | -|`ignoreNamespaceSelectors`
_bool_| IgnoreNamespaceSelectors, if true, ignores NamespaceSelector settings from the PodMonitor and ServiceMonitor configs, so that they only discover endpoints within their current namespace. | -|`enforcedNamespaceLabel`
_string_| EnforcedNamespaceLabel enforces adding a namespace label of origin for each metric that is user-created. The label value is always the namespace of the object that is being created. | -|`enforcedSampleLimit`
_uint64_| EnforcedSampleLimit defines a global limit on the number of scraped samples that are accepted. This overrides any SampleLimit set per ServiceMonitor and/or PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep the overall number of samples and series under the desired limit. Note that if a SampleLimit from a ServiceMonitor or PodMonitor is lower, that value is used instead. | -|`enforcedTargetLimit`
_uint64_| EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set per ServiceMonitor and/or PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall number of targets under the desired limit. Note that if a TargetLimit from a ServiceMonitor or PodMonitor is higher, that value is used instead. | -|`instanceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| InstanceSelector determines which MetricsInstances should be selected for running. Each instance runs its own set of Metrics components, including service discovery, scraping, and remote_write. | -|`instanceNamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| InstanceNamespaceSelector is the set of labels that determines which namespaces to watch for MetricsInstances. If not provided, it only checks its own namespace. | -### MultilineStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -MultilineStageSpec merges multiple lines into a multiline block before passing it on to the next stage in the pipeline. -#### Fields -|Field|Description| -|-|-| -|`firstLine`
_string_| RE2 regular expression. Creates a new multiline block when matched. Required. | -|`maxWaitTime`
_string_| Maximum time to wait before passing on the multiline block to the next stage if no new lines are received. Defaults to 3s. | -|`maxLines`
_int_| Maximum number of lines a block can have. A new block is started if the number of lines surpasses this value. Defaults to 128. | -### ObjectSelector -ObjectSelector is a set of selectors to use for finding an object in the resource hierarchy. When NamespaceSelector is nil, search for objects directly in the ParentNamespace. -#### Fields -|Field|Description| -|-|-| -|`ObjectType`
_[sigs.k8s.io/controller-runtime/pkg/client.Object](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client#Object)_| | -|`ParentNamespace`
_string_| | -|`NamespaceSelector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| | -|`Labels`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| | -### OutputStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -OutputStageSpec is an action stage that takes data from the extracted map and changes the log line that will be sent to Loki. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from extract data to use for the log entry. Required. | -### PackStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -PackStageSpec is a transform stage that lets you embed extracted values and labels into the log line by packing the log line and labels inside of a JSON object. -#### Fields -|Field|Description| -|-|-| -|`labels`
_[]string_| Name from extracted data or line labels. Required. Labels provided here are automatically removed from output labels. | -|`ingestTimestamp`
_bool_| If the resulting log line should use any existing timestamp or use time.Now() when the line was created. Set to true when combining several log streams from different containers to avoid out of order errors. | -### PipelineStageSpec -(Appears on:[PodLogsSpec](#monitoring.grafana.com/v1alpha1.PodLogsSpec)) -PipelineStageSpec defines an individual pipeline stage. Each stage type is mutually exclusive and no more than one may be set per stage. More information on pipelines can be found in the Promtail documentation: https://grafana.com/docs/loki/latest/clients/promtail/pipelines/ -#### Fields -|Field|Description| -|-|-| -|`cri`
_[CRIStageSpec](#monitoring.grafana.com/v1alpha1.CRIStageSpec)_| CRI is a parsing stage that reads log lines using the standard CRI logging format. Supply cri: {} to enable. | -|`docker`
_[DockerStageSpec](#monitoring.grafana.com/v1alpha1.DockerStageSpec)_| Docker is a parsing stage that reads log lines using the standard Docker logging format. Supply docker: {} to enable. | -|`drop`
_[DropStageSpec](#monitoring.grafana.com/v1alpha1.DropStageSpec)_| Drop is a filtering stage that lets you drop certain logs. | -|`json`
_[JSONStageSpec](#monitoring.grafana.com/v1alpha1.JSONStageSpec)_| JSON is a parsing stage that reads the log line as JSON and accepts JMESPath expressions to extract data. Information on JMESPath: http://jmespath.org/ | -|`labelAllow`
_[]string_| LabelAllow is an action stage that only allows the provided labels to be included in the label set that is sent to Loki with the log entry. | -|`labelDrop`
_[]string_| LabelDrop is an action stage that drops labels from the label set that is sent to Loki with the log entry. | -|`labels`
_map[string]string_| Labels is an action stage that takes data from the extracted map and modifies the label set that is sent to Loki with the log entry. The key is REQUIRED and represents the name for the label that will be created. Value is optional and will be the name from extracted data to use for the value of the label. If the value is not provided, it defaults to match the key. | -|`limit`
_[LimitStageSpec](#monitoring.grafana.com/v1alpha1.LimitStageSpec)_| Limit is a rate-limiting stage that throttles logs based on several options. | -|`match`
_[MatchStageSpec](#monitoring.grafana.com/v1alpha1.MatchStageSpec)_| Match is a filtering stage that conditionally applies a set of stages or drop entries when a log entry matches a configurable LogQL stream selector and filter expressions. | -|`metrics`
_[map[string]github.com/grafana/agent/internal/static/operator/apis/monitoring/v1alpha1.MetricsStageSpec](#monitoring.grafana.com/v1alpha1.MetricsStageSpec)_| Metrics is an action stage that supports defining and updating metrics based on data from the extracted map. Created metrics are not pushed to Loki or Prometheus and are instead exposed via the /metrics endpoint of the Grafana Agent pod. The Grafana Agent Operator should be configured with a MetricsInstance that discovers the logging DaemonSet to collect metrics created by this stage. | -|`multiline`
_[MultilineStageSpec](#monitoring.grafana.com/v1alpha1.MultilineStageSpec)_| Multiline stage merges multiple lines into a multiline block before passing it on to the next stage in the pipeline. | -|`output`
_[OutputStageSpec](#monitoring.grafana.com/v1alpha1.OutputStageSpec)_| Output stage is an action stage that takes data from the extracted map and changes the log line that will be sent to Loki. | -|`pack`
_[PackStageSpec](#monitoring.grafana.com/v1alpha1.PackStageSpec)_| Pack is a transform stage that lets you embed extracted values and labels into the log line by packing the log line and labels inside of a JSON object. | -|`regex`
_[RegexStageSpec](#monitoring.grafana.com/v1alpha1.RegexStageSpec)_| Regex is a parsing stage that parses a log line using a regular expression. Named capture groups in the regex allows for adding data into the extracted map. | -|`replace`
_[ReplaceStageSpec](#monitoring.grafana.com/v1alpha1.ReplaceStageSpec)_| Replace is a parsing stage that parses a log line using a regular expression and replaces the log line. Named capture groups in the regex allows for adding data into the extracted map. | -|`template`
_[TemplateStageSpec](#monitoring.grafana.com/v1alpha1.TemplateStageSpec)_| Template is a transform stage that manipulates the values in the extracted map using Go's template syntax. | -|`tenant`
_[TenantStageSpec](#monitoring.grafana.com/v1alpha1.TenantStageSpec)_| Tenant is an action stage that sets the tenant ID for the log entry picking it from a field in the extracted data map. If the field is missing, the default LogsClientSpec.tenantId will be used. | -|`timestamp`
_[TimestampStageSpec](#monitoring.grafana.com/v1alpha1.TimestampStageSpec)_| Timestamp is an action stage that can change the timestamp of a log line before it is sent to Loki. If not present, the timestamp of a log line defaults to the time when the log line was read. | -### PodLogs -(Appears on:[LogsDeployment](#monitoring.grafana.com/v1alpha1.LogsDeployment)) -PodLogs defines how to collect logs for a pod. -#### Fields -|Field|Description| -|-|-| -|`metadata`
_[Kubernetes meta/v1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_| Refer to the Kubernetes API documentation for the fields of the `metadata` field. | -|`spec`
_[PodLogsSpec](#monitoring.grafana.com/v1alpha1.PodLogsSpec)_| Spec holds the specification of the desired behavior for the PodLogs. | -|`jobLabel`
_string_| The label to use to retrieve the job name from. | -|`podTargetLabels`
_[]string_| PodTargetLabels transfers labels on the Kubernetes Pod onto the target. | -|`selector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Selector to select Pod objects. Required. | -|`namespaceSelector`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.NamespaceSelector](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.NamespaceSelector)_| Selector to select which namespaces the Pod objects are discovered from. | -|`pipelineStages`
_[[]PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)_| Pipeline stages for this pod. Pipeline stages support transforming and filtering log lines. | -|`relabelings`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RelabelConfig)_| RelabelConfigs to apply to logs before delivering. Grafana Agent Operator automatically adds relabelings for a few standard Kubernetes fields and replaces original scrape job name with __tmp_logs_job_name. More info: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#relabel_configs | -### PodLogsSpec -(Appears on:[PodLogs](#monitoring.grafana.com/v1alpha1.PodLogs)) -PodLogsSpec defines how to collect logs for a pod. -#### Fields -|Field|Description| -|-|-| -|`jobLabel`
_string_| The label to use to retrieve the job name from. | -|`podTargetLabels`
_[]string_| PodTargetLabels transfers labels on the Kubernetes Pod onto the target. | -|`selector`
_[Kubernetes meta/v1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)_| Selector to select Pod objects. Required. | -|`namespaceSelector`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.NamespaceSelector](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.NamespaceSelector)_| Selector to select which namespaces the Pod objects are discovered from. | -|`pipelineStages`
_[[]PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)_| Pipeline stages for this pod. Pipeline stages support transforming and filtering log lines. | -|`relabelings`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RelabelConfig)_| RelabelConfigs to apply to logs before delivering. Grafana Agent Operator automatically adds relabelings for a few standard Kubernetes fields and replaces original scrape job name with __tmp_logs_job_name. More info: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#relabel_configs | -### QueueConfig -(Appears on:[RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)) -QueueConfig allows the tuning of remote_write queue_config parameters. -#### Fields -|Field|Description| -|-|-| -|`capacity`
_int_| Capacity is the number of samples to buffer per shard before samples start being dropped. | -|`minShards`
_int_| MinShards is the minimum number of shards, i.e., the amount of concurrency. | -|`maxShards`
_int_| MaxShards is the maximum number of shards, i.e., the amount of concurrency. | -|`maxSamplesPerSend`
_int_| MaxSamplesPerSend is the maximum number of samples per send. | -|`batchSendDeadline`
_string_| BatchSendDeadline is the maximum time a sample will wait in the buffer. | -|`maxRetries`
_int_| MaxRetries is the maximum number of times to retry a batch on recoverable errors. | -|`minBackoff`
_string_| MinBackoff is the initial retry delay. MinBackoff is doubled for every retry. | -|`maxBackoff`
_string_| MaxBackoff is the maximum retry delay. | -|`retryOnRateLimit`
_bool_| RetryOnRateLimit retries requests when encountering rate limits. | -### RegexStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -RegexStageSpec is a parsing stage that parses a log line using a regular expression. Named capture groups in the regex allows for adding data into the extracted map. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from extracted data to parse. If empty, defaults to using the log message. | -|`expression`
_string_| RE2 regular expression. Each capture group MUST be named. Required. | -### RemoteWriteSpec -(Appears on:[MetricsInstanceSpec](#monitoring.grafana.com/v1alpha1.MetricsInstanceSpec), [MetricsSubsystemSpec](#monitoring.grafana.com/v1alpha1.MetricsSubsystemSpec)) -RemoteWriteSpec defines the remote_write configuration for Prometheus. -#### Fields -|Field|Description| -|-|-| -|`name`
_string_| Name of the remote_write queue. Must be unique if specified. The name is used in metrics and logging in order to differentiate queues. | -|`url`
_string_| URL of the endpoint to send samples to. | -|`remoteTimeout`
_string_| RemoteTimeout is the timeout for requests to the remote_write endpoint. | -|`headers`
_map[string]string_| Headers is a set of custom HTTP headers to be sent along with each remote_write request. Be aware that any headers set by Grafana Agent itself can't be overwritten. | -|`writeRelabelConfigs`
_[[]github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RelabelConfig)_| WriteRelabelConfigs holds relabel_configs to relabel samples before they are sent to the remote_write endpoint. | -|`basicAuth`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.BasicAuth)_| BasicAuth for the URL. | -|`oauth2`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.OAuth2](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.OAuth2)_| Oauth2 for URL | -|`bearerToken`
_string_| BearerToken used for remote_write. | -|`bearerTokenFile`
_string_| BearerTokenFile used to read bearer token. | -|`sigv4`
_[SigV4Config](#monitoring.grafana.com/v1alpha1.SigV4Config)_| SigV4 configures SigV4-based authentication to the remote_write endpoint. SigV4-based authentication is used if SigV4 is defined, even with an empty object. | -|`tlsConfig`
_[github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.TLSConfig)_| TLSConfig to use for remote_write. | -|`proxyUrl`
_string_| ProxyURL to proxy requests through. Optional. | -|`queueConfig`
_[QueueConfig](#monitoring.grafana.com/v1alpha1.QueueConfig)_| QueueConfig allows tuning of the remote_write queue parameters. | -|`metadataConfig`
_[MetadataConfig](#monitoring.grafana.com/v1alpha1.MetadataConfig)_| MetadataConfig configures the sending of series metadata to remote storage. | -### ReplaceStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -ReplaceStageSpec is a parsing stage that parses a log line using a regular expression and replaces the log line. Named capture groups in the regex allows for adding data into the extracted map. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from extracted data to parse. If empty, defaults to using the log message. | -|`expression`
_string_| RE2 regular expression. Each capture group MUST be named. Required. | -|`replace`
_string_| Value to replace the captured group with. | -### SigV4Config -(Appears on:[RemoteWriteSpec](#monitoring.grafana.com/v1alpha1.RemoteWriteSpec)) -SigV4Config specifies configuration to perform SigV4 authentication. -#### Fields -|Field|Description| -|-|-| -|`region`
_string_| Region of the AWS endpoint. If blank, the region from the default credentials chain is used. | -|`accessKey`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| AccessKey holds the secret of the AWS API access key to use for signing. If not provided, the environment variable AWS_ACCESS_KEY_ID is used. | -|`secretKey`
_[Kubernetes core/v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core)_| SecretKey of the AWS API to use for signing. If blank, the environment variable AWS_SECRET_ACCESS_KEY is used. | -|`profile`
_string_| Profile is the named AWS profile to use for authentication. | -|`roleARN`
_string_| RoleARN is the AWS Role ARN to use for authentication, as an alternative for using the AWS API keys. | -### TemplateStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -TemplateStageSpec is a transform stage that manipulates the values in the extracted map using Go's template syntax. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from extracted data to parse. Required. If empty, defaults to using the log message. | -|`template`
_string_| Go template string to use. Required. In addition to normal template functions, ToLower, ToUpper, Replace, Trim, TrimLeft, TrimRight, TrimPrefix, and TrimSpace are also available. | -### TenantStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -TenantStageSpec is an action stage that sets the tenant ID for the log entry picking it from a field in the extracted data map. -#### Fields -|Field|Description| -|-|-| -|`label`
_string_| Name from labels whose value should be set as tenant ID. Mutually exclusive with source and value. | -|`source`
_string_| Name from extracted data to use as the tenant ID. Mutually exclusive with label and value. | -|`value`
_string_| Value to use for the template ID. Useful when this stage is used within a conditional pipeline such as match. Mutually exclusive with label and source. | -### TimestampStageSpec -(Appears on:[PipelineStageSpec](#monitoring.grafana.com/v1alpha1.PipelineStageSpec)) -TimestampStageSpec is an action stage that can change the timestamp of a log line before it is sent to Loki. -#### Fields -|Field|Description| -|-|-| -|`source`
_string_| Name from extracted data to use as the timestamp. Required. | -|`format`
_string_| Determines format of the time string. Required. Can be one of: ANSIC, UnixDate, RubyDate, RFC822, RFC822Z, RFC850, RFC1123, RFC1123Z, RFC3339, RFC3339Nano, Unix, UnixMs, UnixUs, UnixNs. | -|`fallbackFormats`
_[]string_| Fallback formats to try if format fails. | -|`location`
_string_| IANA Timezone Database string. | -|`actionOnFailure`
_string_| Action to take when the timestamp can't be extracted or parsed. Can be skip or fudge. Defaults to fudge. | diff --git a/docs/sources/operator/architecture.md b/docs/sources/operator/architecture.md deleted file mode 100644 index ba0b5c97fd..0000000000 --- a/docs/sources/operator/architecture.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/architecture/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/architecture/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/architecture/ -- /docs/grafana-cloud/send-data/agent/operator/architecture/ -canonical: https://grafana.com/docs/agent/latest/operator/architecture/ -description: Learn about Grafana Agent architecture -title: Architecture -weight: 300 ---- - -# Architecture - -Grafana Agent Operator works by watching for Kubernetes [custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) that specify how to collect telemetry data from your Kubernetes cluster and where to send it. Agent Operator manages corresponding Grafana Agent deployments in your cluster by watching for changes against the custom resources. - -Grafana Agent Operator works in two phases—it discovers a hierarchy of custom resources and it reconciles that hierarchy into a Grafana Agent deployment. - -## Custom resource hierarchy - -The root of the custom resource hierarchy is the `GrafanaAgent` resource—the primary resource Agent Operator looks for. `GrafanaAgent` is called the _root_ because it -discovers other sub-resources, `MetricsInstance` and `LogsInstance`. The `GrafanaAgent` resource endows them with Pod attributes defined in the GrafanaAgent specification, for example, Pod requests, limits, affinities, and tolerations, and defines the Grafana Agent image. You can only define Pod attributes at the `GrafanaAgent` level. They are propagated to MetricsInstance and LogsInstance Pods. - -The full hierarchy of custom resources is as follows: - -- `GrafanaAgent` - - `MetricsInstance` - - `PodMonitor` - - `Probe` - - `ServiceMonitor` - - `LogsInstance` - - `PodLogs` - -The following table describes these custom resources: - -| Custom resource | description | -|---|---| -| `GrafanaAgent` | Discovers one or more `MetricsInstance` and `LogsInstance` resources. | -| `MetricsInstance` | Defines where to ship collected metrics. This rolls out a Grafana Agent StatefulSet that will scrape and ship metrics to a `remote_write` endpoint. | -| `ServiceMonitor` | Collects [cAdvisor](https://github.com/google/cadvisor) and [kubelet metrics](https://github.com/kubernetes/kube-state-metrics). This configures the `MetricsInstance` / Agent StatefulSet | -| `LogsInstance` | Defines where to ship collected logs. This rolls out a Grafana Agent [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) that will tail log files on your cluster nodes. | -| `PodLogs` | Collects container logs from Kubernetes Pods. This configures the `LogsInstance` / Agent DaemonSet. | - -Most of the Grafana Agent Operator resources have the ability to reference a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) or a -[Secret](https://kubernetes.io/docs/concepts/configuration/secret/). All referenced ConfigMaps or Secrets are added into the resource -hierarchy. - -When a hierarchy is established, each item is watched for changes. Any changed -item causes a reconcile of the root `GrafanaAgent` resource, either -creating, modifying, or deleting the corresponding Grafana Agent deployment. - -A single resource can belong to multiple hierarchies. For example, if two -`GrafanaAgents` use the same Probe, modifying that Probe causes both -`GrafanaAgents` to be reconciled. - -To set up monitoring, Grafana Agent Operator works in the following two phases: - -- Builds (discovers) a hierarchy of custom resources. -- Reconciles that hierarchy into a Grafana Agent deployment. - -Agent Operator also performs [sharding and replication](#sharding-and-replication) and adds [labels](#added-labels) to every metric. - -## How Agent Operator builds the custom resource hierarchy - -Grafana Agent Operator builds the hierarchy using label matching on the custom resources. The following figure illustrates the matching. The `GrafanaAgent` picks up the `MetricsInstance` -and `LogsInstance` that match the label `instance: primary`. The instances pick up the resources the same way. - -{{
}} - -### To validate the Secrets - -The generated configurations are saved in Secrets. To download and -validate them manually, use the following commands: - -``` -$ kubectl get secrets -logs-config -o json | jq -r '.data."agent.yml"' | base64 --decode -$ kubectl get secrets -config -o json | jq -r '.data."agent.yml"' | base64 --decode -``` - -## How Agent Operator reconciles the custom resource hierarchy - -When a resource hierarchy is created, updated, or deleted, a reconcile occurs. -When a `GrafanaAgent` resource is deleted, the corresponding Grafana Agent -deployment will also be deleted. - -Reconciling creates the following cluster resources: - -1. A Secret that holds the Grafana Agent - [configuration]({{< relref "../static/configuration/_index.md" >}}) is generated. -2. A Secret that holds all referenced Secrets or ConfigMaps from - the resource hierarchy is generated. This ensures that Secrets referenced from a custom - resource in another namespace can still be read. -3. A Service is created to govern the StatefulSets that are generated. -4. One StatefulSet per Prometheus shard is created. - -PodMonitors, Probes, and ServiceMonitors are turned into individual scrape jobs -which all use Kubernetes Service Discovery (SD). - -## Sharding and replication - -The GrafanaAgent resource can specify a number of shards. Each shard results in -the creation of a StatefulSet with a hashmod + keep relabel_config per job: - -```yaml -- source_labels: [__address__] - target_label: __tmp_hash - modulus: NUM_SHARDS - action: hashmod -- source_labels: [__tmp_hash] - regex: CURRENT_STATEFULSET_SHARD - action: keep -``` - -This allows for horizontal scaling capabilities, where each shard -will handle roughly 1/N of the total scrape load. Note that this does not use -consistent hashing, which means changing the number of shards will cause -anywhere between 1/N to N targets to reshuffle. - -The sharding mechanism is borrowed from the Prometheus Operator. - -The number of replicas can be defined, similarly to the number of shards. This -creates deduplicate shards. This must be paired with a `remote_write` system that -can perform HA deduplication. [Grafana Cloud](/docs/grafana-cloud/) and [Mimir](/docs/mimir/latest/) provide this out of the -box, and the Grafana Agent Operator defaults support these two systems. - -The total number of created metrics pods will be the product of `numShards * -numReplicas`. - -## Added labels - -Two labels are added by default to every metric: - -- `cluster`, representing the `GrafanaAgent` deployment. Holds the value of - `/`. -- `__replica__`, representing the replica number of the Agent. This label works - out of the box with Grafana Cloud and Cortex's [HA - deduplication](https://cortexmetrics.io/docs/guides/ha-pair-handling/). - -The shard number is not added as a label, as sharding is designed to be -transparent on the receiver end. - -## Enable sharding and replication - -To enable sharding and replication, you must set the `shards` and `replicas` properties in the Grafana Agent configuration file. For example, the following configuration file would shard the data into three shards and replicate each shard to two other Grafana Agent instances: - -``` -shards: 3 -replicas: 2 -``` - -You can also enable sharding and replication by setting the `shards` and `replicas` arguments when you start the Grafana Agent. - -### Examples - -The following examples show you how to enable sharding and replication in a Kubernetes environment. - -* To shard the data into three shards and replicate each shard to two other Grafana Agent instances, you would use the following deployment manifest: - - ``` - apiVersion: apps/v1 - kind: Deployment - metadata: - name: grafana-agent - spec: - replicas: 3 - selector: - matchLabels: - app: grafana-agent - template: - metadata: - labels: - app: grafana-agent - spec: - containers: - - name: grafana-agent - image: grafana/agent:latest - args: - - "--shards=3" - - "--replicas=2" - ``` - -* To shard the data into 10 shards and replicate each shard to three other Grafana Agent instances, you would use the following deployment manifest: - - ``` - apiVersion: apps/v1 - kind: Deployment - metadata: - name: grafana-agent - spec: - replicas: 10 - selector: - matchLabels: - app: grafana-agent - template: - metadata: - labels: - app: grafana-agent - spec: - containers: - - name: grafana-agent - image: grafana/agent:latest - args: - - "--shards=10" - - "--replicas=3" - ``` - diff --git a/docs/sources/operator/deploy-agent-operator-resources.md b/docs/sources/operator/deploy-agent-operator-resources.md deleted file mode 100644 index 6b6f6564c8..0000000000 --- a/docs/sources/operator/deploy-agent-operator-resources.md +++ /dev/null @@ -1,435 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/deploy-agent-operator-resources/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/deploy-agent-operator-resources/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/deploy-agent-operator-resources/ -- /docs/grafana-cloud/send-data/agent/operator/deploy-agent-operator-resources/ -- custom-resource-quickstart/ -canonical: https://grafana.com/docs/agent/latest/operator/deploy-agent-operator-resources/ -description: Learn how to deploy Operator resources -title: Deploy Operator resources -weight: 120 ---- -# Deploy Operator resources - -To start collecting telemetry data, you need to roll out Grafana Agent Operator custom resources into your Kubernetes cluster. Before you can create the custom resources, you must first apply the Agent Custom Resource Definitions (CRDs) and install Agent Operator, with or without Helm. If you haven't yet taken these steps, follow the instructions in one of the following topics: - -- [Install Agent Operator]({{< relref "./getting-started" >}}) -- [Install Agent Operator with Helm]({{< relref "./helm-getting-started" >}}) - -Follow the steps in this guide to roll out the Grafana Agent Operator custom resources to: - -- Scrape and ship cAdvisor and kubelet metrics to a Prometheus-compatible metrics endpoint. -- Collect and ship your Pods’ container logs to a Loki-compatible logs endpoint. - -The hierarchy of custom resources is as follows: - -- `GrafanaAgent` - - `MetricsInstance` - - `PodMonitor` - - `Probe` - - `ServiceMonitor` - - `LogsInstance` - - `PodLogs` - -To learn more about the custom resources Agent Operator provides and their hierarchy, see [Grafana Agent Operator architecture]({{< relref "./architecture" >}}). - -{{< admonition type="note" >}} -Agent Operator is currently in [beta]({{< relref "../stability.md#beta" >}}) and its custom resources are subject to change. -{{< /admonition >}} - -## Before you begin - -Before you begin, make sure that you have deployed the Grafana Agent Operator CRDs and installed Agent Operator into your cluster. See [Install Grafana Agent Operator with Helm]({{< relref "./helm-getting-started" >}}) or [Install Grafana Agent Operator]({{< relref "./getting-started" >}}) for instructions. - -## Deploy the GrafanaAgent resource - -In this section, you'll roll out a `GrafanaAgent` resource. See [Grafana Agent Operator architecture]({{< relref "./architecture" >}}) for a discussion of the resources in the `GrafanaAgent` resource hierarchy. - -{{< admonition type="note" >}} -Due to the variety of possible deployment architectures, the official Agent Operator Helm chart does not provide built-in templates for the custom resources described in this guide. You must configure and deploy these manually as described in this section. We recommend templating and adding the following manifests to your own in-house Helm charts and GitOps flows. -{{< /admonition >}} - -To deploy the `GrafanaAgent` resource: - -1. Copy the following manifests to a file: - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: GrafanaAgent - metadata: - name: grafana-agent - namespace: default - labels: - app: grafana-agent - spec: - image: grafana/agent:{{< param "AGENT_RELEASE" >}} - integrations: - selector: - matchLabels: - agent: grafana-agent-integrations - logLevel: info - serviceAccountName: grafana-agent - metrics: - instanceSelector: - matchLabels: - agent: grafana-agent-metrics - externalLabels: - cluster: cloud - - logs: - instanceSelector: - matchLabels: - agent: grafana-agent-logs - - --- - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: grafana-agent - namespace: default - - --- - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: grafana-agent - rules: - - apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - nodes/metrics - - services - - endpoints - - pods - - events - verbs: - - get - - list - - watch - - apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch - - nonResourceURLs: - - /metrics - - /metrics/cadvisor - verbs: - - get - - --- - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: grafana-agent - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent - subjects: - - kind: ServiceAccount - name: grafana-agent - namespace: default - ``` - - In the first manifest, the `GrafanaAgent` resource: - - - Specifies an Agent image version. - - Specifies `MetricsInstance` and `LogsInstance` selectors. These search for `MetricsInstances` and `LogsInstances` in the same namespace with labels matching `agent: grafana-agent-metrics` and `agent: grafana-agent-logs`, respectively. - - Sets a `cluster: cloud` label for all metrics shipped to your Prometheus-compatible endpoint. Change this label to your cluster name. To search for `MetricsInstances` or `LogsInstances` in a *different* namespace, use the `instanceNamespaceSelector` field. To learn more about this field, see the `GrafanaAgent` [CRD specification](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml). - -1. Customize the manifests as needed and roll them out to your cluster using `kubectl apply -f` followed by the filename. - - This step creates a `ServiceAccount`, `ClusterRole`, and `ClusterRoleBinding` for the `GrafanaAgent` resource. - - Deploying a `GrafanaAgent` resource on its own does not spin up Agent Pods. Agent Operator creates Agent Pods once `MetricsInstance` and `LogsIntance` resources have been created. Follow the instructions in the [Deploy a MetricsInstance resource](#deploy-a-metricsinstance-resource) and [Deploy LogsInstance and PodLogs resources](#deploy-logsinstance-and-podlogs-resources) sections to create these resources. - -### Disable feature flags reporting - -To disable the [reporting]({{< relref "../static/configuration/flags.md#report-information-usage" >}}) usage of feature flags to Grafana, set `disableReporting` field to `true`. - -### Disable support bundle generation - -To disable the [support bundles functionality]({{< relref "../static/configuration/flags.md#support-bundles" >}}), set the `disableSupportBundle` field to `true`. - -## Deploy a MetricsInstance resource - -Next, you'll roll out a `MetricsInstance` resource. `MetricsInstance` resources define a `remote_write` sink for metrics and configure one or more selectors to watch for creation and updates to `*Monitor` objects. These objects allow you to define Agent scrape targets via Kubernetes manifests: - -- [ServiceMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) -- [PodMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#podmonitor) -- [Probes](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#probe) - -To deploy a `MetricsInstance` resource: - -1. Copy the following manifest to a file: - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: MetricsInstance - metadata: - name: primary - namespace: default - labels: - agent: grafana-agent-metrics - spec: - remoteWrite: - - url: your_remote_write_URL - basicAuth: - username: - name: primary-credentials-metrics - key: username - password: - name: primary-credentials-metrics - key: password - - # Supply an empty namespace selector to look in all namespaces. Remove - # this to only look in the same namespace as the MetricsInstance CR - serviceMonitorNamespaceSelector: {} - serviceMonitorSelector: - matchLabels: - instance: primary - - # Supply an empty namespace selector to look in all namespaces. Remove - # this to only look in the same namespace as the MetricsInstance CR. - podMonitorNamespaceSelector: {} - podMonitorSelector: - matchLabels: - instance: primary - - # Supply an empty namespace selector to look in all namespaces. Remove - # this to only look in the same namespace as the MetricsInstance CR. - probeNamespaceSelector: {} - probeSelector: - matchLabels: - instance: primary - ``` - -1. Replace the `remote_write` URL and customize the namespace and label configuration as necessary. - - This step associates the `MetricsInstance` resource with the `agent: grafana-agent` `GrafanaAgent` resource deployed in the previous step. The `MetricsInstance` resource watches for creation and updates to `*Monitors` with the `instance: primary` label. - -1. Once you've rolled out the manifest, create the `basicAuth` credentials [using a Kubernetes Secret](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-config-file/): - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: primary-credentials-metrics - namespace: default - stringData: - username: 'your_cloud_prometheus_username' - password: 'your_cloud_prometheus_API_key' - ``` - -If you're using Grafana Cloud, you can find your hosted Loki endpoint username and password by clicking **Details** on the Loki tile on the [Grafana Cloud Portal](/profile/org). If you want to base64-encode these values yourself, use `data` instead of `stringData`. - -Once you've rolled out the `MetricsInstance` and its Secret, you can confirm that the `MetricsInstance` Agent is up and running using `kubectl get pod`. Since you haven't defined any monitors yet, this Agent doesn't have any scrape targets defined. In the next section, you'll create scrape targets for the cAdvisor and kubelet endpoints exposed by the `kubelet` service in the cluster. - -## Create ServiceMonitors for kubelet and cAdvisor endpoints - -Next, you'll create ServiceMonitors for kubelet and cAdvisor metrics exposed by the `kubelet` service. Every Node in your cluster exposes kubelet and cAdvisor metrics at `/metrics` and `/metrics/cadvisor`, respectively. Agent Operator creates a `kubelet` service that exposes these Node endpoints so that they can be scraped using ServiceMonitors. - -To scrape the kubelet and cAdvisor endpoints: - -1. Copy the following kubelet ServiceMonitor manifest to a file, then roll it out in your cluster using `kubectl apply -f` followed by the filename. - - ```yaml - apiVersion: monitoring.coreos.com/v1 - kind: ServiceMonitor - metadata: - labels: - instance: primary - name: kubelet-monitor - namespace: default - spec: - endpoints: - - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - honorLabels: true - interval: 60s - metricRelabelings: - - action: keep - regex: kubelet_cgroup_manager_duration_seconds_count|go_goroutines|kubelet_pod_start_duration_seconds_count|kubelet_runtime_operations_total|kubelet_pleg_relist_duration_seconds_bucket|volume_manager_total_volumes|kubelet_volume_stats_capacity_bytes|container_cpu_usage_seconds_total|container_network_transmit_bytes_total|kubelet_runtime_operations_errors_total|container_network_receive_bytes_total|container_memory_swap|container_network_receive_packets_total|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|kubelet_running_pod_count|node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate|container_memory_working_set_bytes|storage_operation_errors_total|kubelet_pleg_relist_duration_seconds_count|kubelet_running_pods|rest_client_request_duration_seconds_bucket|process_resident_memory_bytes|storage_operation_duration_seconds_count|kubelet_running_containers|kubelet_runtime_operations_duration_seconds_bucket|kubelet_node_config_error|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_running_container_count|kubelet_volume_stats_available_bytes|kubelet_volume_stats_inodes|container_memory_rss|kubelet_pod_worker_duration_seconds_count|kubelet_node_name|kubelet_pleg_relist_interval_seconds_bucket|container_network_receive_packets_dropped_total|kubelet_pod_worker_duration_seconds_bucket|container_start_time_seconds|container_network_transmit_packets_dropped_total|process_cpu_seconds_total|storage_operation_duration_seconds_bucket|container_memory_cache|container_network_transmit_packets_total|kubelet_volume_stats_inodes_used|up|rest_client_requests_total - sourceLabels: - - __name__ - port: https-metrics - relabelings: - - sourceLabels: - - __metrics_path__ - targetLabel: metrics_path - - action: replace - targetLabel: job - replacement: integrations/kubernetes/kubelet - scheme: https - tlsConfig: - insecureSkipVerify: true - namespaceSelector: - matchNames: - - default - selector: - matchLabels: - app.kubernetes.io/name: kubelet - ``` - -1. Copy the following cAdvisor ServiceMonitor manifest to a file, then roll it out in your cluster using `kubectl apply -f` followed by the filename. - - ```yaml - apiVersion: monitoring.coreos.com/v1 - kind: ServiceMonitor - metadata: - labels: - instance: primary - name: cadvisor-monitor - namespace: default - spec: - endpoints: - - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - honorLabels: true - honorTimestamps: false - interval: 60s - metricRelabelings: - - action: keep - regex: kubelet_cgroup_manager_duration_seconds_count|go_goroutines|kubelet_pod_start_duration_seconds_count|kubelet_runtime_operations_total|kubelet_pleg_relist_duration_seconds_bucket|volume_manager_total_volumes|kubelet_volume_stats_capacity_bytes|container_cpu_usage_seconds_total|container_network_transmit_bytes_total|kubelet_runtime_operations_errors_total|container_network_receive_bytes_total|container_memory_swap|container_network_receive_packets_total|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|kubelet_running_pod_count|node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate|container_memory_working_set_bytes|storage_operation_errors_total|kubelet_pleg_relist_duration_seconds_count|kubelet_running_pods|rest_client_request_duration_seconds_bucket|process_resident_memory_bytes|storage_operation_duration_seconds_count|kubelet_running_containers|kubelet_runtime_operations_duration_seconds_bucket|kubelet_node_config_error|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_running_container_count|kubelet_volume_stats_available_bytes|kubelet_volume_stats_inodes|container_memory_rss|kubelet_pod_worker_duration_seconds_count|kubelet_node_name|kubelet_pleg_relist_interval_seconds_bucket|container_network_receive_packets_dropped_total|kubelet_pod_worker_duration_seconds_bucket|container_start_time_seconds|container_network_transmit_packets_dropped_total|process_cpu_seconds_total|storage_operation_duration_seconds_bucket|container_memory_cache|container_network_transmit_packets_total|kubelet_volume_stats_inodes_used|up|rest_client_requests_total - sourceLabels: - - __name__ - path: /metrics/cadvisor - port: https-metrics - relabelings: - - sourceLabels: - - __metrics_path__ - targetLabel: metrics_path - - action: replace - targetLabel: job - replacement: integrations/kubernetes/cadvisor - scheme: https - tlsConfig: - insecureSkipVerify: true - namespaceSelector: - matchNames: - - default - selector: - matchLabels: - app.kubernetes.io/name: kubelet - ``` - -These two ServiceMonitors configure Agent to scrape all the kubelet and cAdvisor endpoints in your Kubernetes cluster (one of each per Node). In addition, it defines a `job` label which you can update (it is preset here for compatibility with Grafana Cloud's Kubernetes integration). It also provides an allowlist containing a core set of Kubernetes metrics to reduce remote metrics usage. If you don't need this allowlist, you can omit it, however, your metrics usage will increase significantly. - - When you're done, Agent should now be shipping kubelet and cAdvisor metrics to your remote Prometheus endpoint. To check this in Grafana Cloud, go to your dashboards, select **Integration - Kubernetes**, then select **Kubernetes / Kubelet**. - -## Deploy LogsInstance and PodLogs resources - -Next, you'll deploy a `LogsInstance` resource to collect logs from your cluster Nodes and ship these to your remote Loki endpoint. Agent Operator deploys a DaemonSet of Agents in your cluster that will tail log files defined in `PodLogs` resources. - -To deploy the `LogsInstance` resource into your cluster: - -1. Copy the following manifest to a file, then roll it out in your cluster using `kubectl apply -f` followed by the filename. - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: LogsInstance - metadata: - name: primary - namespace: default - labels: - agent: grafana-agent-logs - spec: - clients: - - url: your_remote_logs_URL - basicAuth: - username: - name: primary-credentials-logs - key: username - password: - name: primary-credentials-logs - key: password - - # Supply an empty namespace selector to look in all namespaces. Remove - # this to only look in the same namespace as the LogsInstance CR - podLogsNamespaceSelector: {} - podLogsSelector: - matchLabels: - instance: primary - ``` - - This `LogsInstance` picks up `PodLogs` resources with the `instance: primary` label. Be sure to set the Loki URL to the correct push endpoint. For Grafana Cloud, this will look similar to `logs-prod-us-central1.grafana.net/loki/api/v1/push`, however check the [Grafana Cloud Portal](/profile/org) to confirm by clicking **Details** on the Loki tile. - - Also note that this example uses the `agent: grafana-agent-logs` label, which associates this `LogsInstance` with the `GrafanaAgent` resource defined earlier. This means that it will inherit requests, limits, affinities and other properties defined in the `GrafanaAgent` custom resource. - -1. To create the Secret for the `LogsInstance` resource, copy the following Secret manifest to a file, then roll it out in your cluster using `kubectl apply -f` followed by the filename. - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: primary-credentials-logs - namespace: default - stringData: - username: 'your_username_here' - password: 'your_password_here' - ``` - - If you're using Grafana Cloud, you can find your hosted Loki endpoint username and password by clicking **Details** on the Loki tile on the [Grafana Cloud Portal](/profile/org). If you want to base64-encode these values yourself, use `data` instead of `stringData`. - -1. Copy the following `PodLogs` manifest to a file, then roll it to your cluster using `kubectl apply -f` followed by the filename. The manifest defines your logging targets. Agent Operator turns this into Agent configuration for the logs subsystem, and rolls it out to the DaemonSet of logging Agents. - - {{< admonition type="note" >}} - The following is a minimal working example which you should adapt to your production needs. - {{< /admonition >}} - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: PodLogs - metadata: - labels: - instance: primary - name: kubernetes-pods - namespace: default - spec: - pipelineStages: - - docker: {} - namespaceSelector: - matchNames: - - default - selector: - matchLabels: {} - ``` - - This example tails container logs for all Pods in the `default` namespace. You can restrict the set of matched Pods by using the `matchLabels` selector. You can also set additional `pipelineStages` and create `relabelings` to add or modify log line labels. To learn more about the `PodLogs` specification and available resource fields, see the [PodLogs CRD](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml). - - The above `PodLogs` resource adds the following labels to log lines: - - - `namespace` - - `service` - - `pod` - - `container` - - `job` (set to `PodLogs_namespace/PodLogs_name`) - - `__path__` (the path to log files, set to `/var/log/pods/*$1/*.log` where `$1` is `__meta_kubernetes_pod_uid/__meta_kubernetes_pod_container_name`) - - To learn more about this configuration format and other available labels, see the [Promtail Scraping](/docs/loki/latest/clients/promtail/scraping/#promtail-scraping-service-discovery) documentation. Agent Operator loads this configuration into the `LogsInstance` agents automatically. - -The DaemonSet of logging agents should be tailing your container logs, applying default labels to the log lines, and shipping them to your remote Loki endpoint. - -## Summary - -You've now rolled out the following into your cluster: - -- A `GrafanaAgent` resource that discovers one or more `MetricsInstance` and `LogsInstances` resources. -- A `MetricsInstance` resource that defines where to ship collected metrics. -- A `ServiceMonitor` resource to collect cAdvisor and kubelet metrics. -- A `LogsInstance` resource that defines where to ship collected logs. -- A `PodLogs` resource to collect container logs from Kubernetes Pods. - -## What's next - -You can verify that everything is working correctly by navigating to your Grafana instance and querying your Loki and Prometheus data sources. - -> Tip: You can deploy multiple GrafanaAgent resources to isolate allocated resources to the agent pods. By default, the GrafanaAgent resource determines the resources of all deployed agent containers. However, you might want different memory limits for metrics versus logs. diff --git a/docs/sources/operator/getting-started.md b/docs/sources/operator/getting-started.md deleted file mode 100644 index e739388087..0000000000 --- a/docs/sources/operator/getting-started.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/getting-started/ -- /docs/grafana-cloud/send-data/agent/operator/getting-started/ -canonical: https://grafana.com/docs/agent/latest/operator/getting-started/ -description: Learn how to install the Operator -title: Install the Operator -weight: 110 ---- - -# Install the Operator - -In this guide, you'll learn how to deploy [Grafana Agent Operator]({{< relref "./_index.md" >}}) into your Kubernetes cluster. This guide does not use Helm. To learn how to deploy Agent Operator using the [grafana-agent-operator Helm chart](https://github.com/grafana/helm-charts/tree/main/charts/agent-operator), see [Install Grafana Agent Operator with Helm]({{< relref "./helm-getting-started.md" >}}). - -> **Note**: If you are shipping your data to Grafana Cloud, use [Kubernetes Monitoring](/docs/grafana-cloud/kubernetes-monitoring/) to set up Agent Operator. Kubernetes Monitoring provides a simplified approach and preconfigured dashboards and alerts. -## Before you begin - -To deploy Agent Operator, make sure that you have the following: - -- A Kubernetes cluster -- The `kubectl` command-line client installed and configured on your machine - -> **Note:** Agent Operator is currently in beta and its custom resources are subject to change. - -## Deploy the Agent Operator Custom Resource Definitions (CRDs) - -Before you can create the custom resources for a Grafana Agent deployment, -you need to deploy the -[Custom Resource Definitions](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) -to the cluster. These definitions describe the schema that the custom -resources will conform to. This is also required for Grafana Agent Operator to run; it -will fail if it can't find the Custom Resource Definitions of objects it is -looking to use. To learn more about the custom resources Agent Operator provides and their hierarchy, see [Grafana Agent Operator architecture]({{< relref "./architecture" >}}). - -You can find the set of Custom Resource Definitions for Grafana Agent Operator in the Grafana Agent repository under -[`operations/agent-static-operator/crds`](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds). - -To deploy the CRDs: - -1. Clone the agent repo and then apply the CRDs from the root of the agent repository: - ``` - kubectl apply -f production/operator/crds - ``` - - This step _must_ be completed before installing Agent Operator—it will -fail to start if the CRDs do not exist. - -2. To check that the CRDs are deployed to your Kubernetes cluster and to access documentation for each resource, use `kubectl explain `. - - For example, `kubectl explain GrafanaAgent` describes the GrafanaAgent CRD, and `kubectl explain GrafanaAgent.spec` gives you information on its spec field. - -## Install Grafana Agent Operator - -Next, install Agent Operator by applying the Agent Operator deployment schema. - -To install Agent Operator: - -1. Copy the following deployment schema to a file, updating the namespace if needed: - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: grafana-agent-operator - namespace: default - labels: - app: grafana-agent-operator - spec: - replicas: 1 - selector: - matchLabels: - app: grafana-agent-operator - template: - metadata: - labels: - app: grafana-agent-operator - spec: - serviceAccountName: grafana-agent-operator - containers: - - name: operator - image: grafana/agent-operator:{{< param "AGENT_RELEASE" >}} - args: - - --kubelet-service=default/kubelet - --- - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: grafana-agent-operator - namespace: default - - --- - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: grafana-agent-operator - rules: - - apiGroups: [monitoring.grafana.com] - resources: - - grafanaagents - - metricsinstances - - logsinstances - - podlogs - - integrations - verbs: [get, list, watch] - - apiGroups: [monitoring.coreos.com] - resources: - - podmonitors - - probes - - servicemonitors - verbs: [get, list, watch] - - apiGroups: [""] - resources: - - namespaces - - nodes - verbs: [get, list, watch] - - apiGroups: [""] - resources: - - secrets - - services - - configmaps - - endpoints - verbs: [get, list, watch, create, update, patch, delete] - - apiGroups: ["apps"] - resources: - - statefulsets - - daemonsets - - deployments - verbs: [get, list, watch, create, update, patch, delete] - - --- - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: grafana-agent-operator - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent-operator - subjects: - - kind: ServiceAccount - name: grafana-agent-operator - namespace: default - ``` - -2. Roll out the deployment in your cluster using `kubectl apply -f` followed by your deployment filename. - -> **Note**: If you want to run Agent Operator locally, make sure your kubectl context is correct. Running locally uses your current kubectl context. If it is set to your production environment, you could accidentally deploy a new Grafana Agent to production. Install CRDs on the cluster prior to running locally. Afterwards, you can run Agent Operator using `go run ./cmd/grafana-agent-operator`. - -## Deploy the Grafana Agent Operator resources - -Agent Operator is now up and running. Next, you need to install a Grafana Agent for Agent Operator to run for you. To do so, follow the instructions in the [Deploy the Grafana Agent Operator resources]({{< relref "./deploy-agent-operator-resources" >}}) topic. diff --git a/docs/sources/operator/helm-getting-started.md b/docs/sources/operator/helm-getting-started.md deleted file mode 100644 index bb63f01190..0000000000 --- a/docs/sources/operator/helm-getting-started.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/helm-getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/helm-getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/helm-getting-started/ -- /docs/grafana-cloud/send-data/agent/operator/helm-getting-started/ -canonical: https://grafana.com/docs/agent/latest/operator/helm-getting-started/ -description: Learn how to install the Operator with Helm charts -title: Install the Operator with Helm -weight: 100 ---- -# Install the Operator with Helm - -In this guide, you'll learn how to deploy [Grafana Agent Operator]({{< relref "./_index.md" >}}) into your Kubernetes cluster using the [grafana-agent-operator Helm chart](https://github.com/grafana/helm-charts/tree/main/charts/agent-operator). To learn how to deploy Agent Operator without using Helm, see [Install Grafana Agent Operator]({{< relref "./getting-started.md" >}}). - -> **Note**: If you are shipping your data to Grafana Cloud, use [Kubernetes Monitoring](/docs/grafana-cloud/kubernetes-monitoring/) to set up Agent Operator. Kubernetes Monitoring provides a simplified approach and preconfigured dashboards and alerts. - -## Before you begin - -To deploy Agent Operator with Helm, make sure that you have the following: - -- A Kubernetes cluster -- The [`kubectl`](https://kubernetes.io/docs/tasks/tools/#kubectl) command-line client installed and configured on your machine -- The [`helm`](https://helm.sh/docs/intro/install/) command-line client installed and configured on your machine - -> **Note:** Agent Operator is currently in beta and its custom resources are subject to change. - -## Install the Agent Operator Helm Chart - -In this section, you'll install the [grafana-agent-operator Helm chart](https://github.com/grafana/helm-charts/tree/main/charts/agent-operator) into your Kubernetes cluster. This will install the latest version of Agent Operator and its [Custom Resource Definitions](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds) (CRDs). The chart configures Operator to maintain a Service that lets you scrape kubelets using a `ServiceMonitor`. - -To install the Agent Operator Helm chart: - -1. Add and update the `grafana` Helm chart repo: - - ```bash - helm repo add grafana https://grafana.github.io/helm-charts - helm repo update - ``` - -1. Install the chart, replacing `my-release` with your release name: - - ```bash - helm install my-release grafana/grafana-agent-operator - ``` - - If you want to modify the default parameters, you can create a `values.yaml` file and pass it to `helm install`: - - ```bash - helm install my-release grafana/grafana-agent-operator -f values.yaml - ``` - - If you want to deploy Agent Operator into a namespace other than `default`, use the `-n` flag: - - ```bash - helm install my-release grafana/grafana-agent-operator -f values.yaml -n my-namespace - ``` - You can find a list of configurable template parameters in the [Helm chart repository](https://github.com/grafana/helm-charts/blob/main/charts/agent-operator/values.yaml). - -1. Once you've successfully deployed the Helm release, confirm that Agent Operator is up and running: - - ```bash - kubectl get pod - kubectl get svc - ``` - - You should see an Agent Operator Pod in `RUNNING` state, and a `kubelet` service. Depending on your setup, this could take a moment. - -## Deploy the Grafana Agent Operator resources - - Agent Operator is now up and running. Next, you need to install a Grafana Agent for Agent Operator to run for you. To do so, follow the instructions in the [Deploy the Grafana Agent Operator resources]({{< relref "./deploy-agent-operator-resources.md" >}}) topic. To learn more about the custom resources Agent Operator provides and their hierarchy, see [Grafana Agent Operator architecture]({{< relref "./architecture" >}}). diff --git a/docs/sources/operator/hierarchy.dot b/docs/sources/operator/hierarchy.dot deleted file mode 100644 index d17522aa17..0000000000 --- a/docs/sources/operator/hierarchy.dot +++ /dev/null @@ -1,71 +0,0 @@ -digraph G { - fontname="Courier New" - edge [fontname="Courier New"] - rankdir="TB" - edge [fontsize=10] - - "GrafanaAgent" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
GrafanaAgent
app.kubernetes.io/name: grafana-agent-operator
> - ]; - "MetricsInstance" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
MetricsInstance
helm.sh/chart: loki-3.2.2 -
instance: primary
> - ]; - "LogsInstance" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
LogsInstance
helm.sh/chart: loki-3.2.2 -
instance: primary
> - ]; - "ServiceMonitor" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
ServiceMonitor
helm.sh/chart: loki-3.2.2 -
instance: primary
> - ]; - "Probe" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
Probe
helm.sh/chart: loki-3.2.2 -
instance: primary
> - ]; - "PodLogs" [ - style = "filled, bold" - penwidth = 2 - fillcolor = "white" - fontname = "Courier New" - shape = "Mrecord" - label =<
PodLogs
helm.sh/chart: loki-3.2.2 -
instance: primary
> - ]; - - - GrafanaAgent -> MetricsInstance [ label="matchLabels:\l instance: primary" ]; - MetricsInstance -> Probe [ label="matchLabels:\l instance: primary" ]; - MetricsInstance -> ServiceMonitor [ label="matchLabels:\l instance: primary" ]; - - GrafanaAgent -> LogsInstance [ label="matchLabels:\l instance: primary" ]; - LogsInstance -> PodLogs [ label="matchLabels:\l instance: primary" ]; - - - -} diff --git a/docs/sources/operator/operator-integrations.md b/docs/sources/operator/operator-integrations.md deleted file mode 100644 index fc49836f81..0000000000 --- a/docs/sources/operator/operator-integrations.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/operator/operator-integrations/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/operator-integrations/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/operator-integrations/ -- /docs/grafana-cloud/send-data/agent/operator/operator-integrations/ -canonical: https://grafana.com/docs/agent/latest/operator/operator-integrations/ -description: Learn how to set up integrations -title: Set up integrations -weight: 350 ---- -# Set up integrations - -This topic provides examples of setting up Grafana Agent Operator integrations, including [node_exporter](#set-up-an-agent-operator-node_exporter-integration) and [mysqld_exporter](#set-up-an-agent-operator-mysqld_exporter-integration). - -## Before you begin - -Before you begin, make sure that you have deployed the Grafana Agent Operator CRDs and installed Agent Operator into your cluster. See [Install Grafana Agent Operator with Helm]({{< relref "./helm-getting-started.md" >}}) or [Install Grafana Agent Operator]({{< relref "./getting-started.md" >}}) for instructions. - -Also, make sure that you [deploy the GrafanaAgent resource]({{< relref "./deploy-agent-operator-resources.md" >}}) and the `yaml` you use has the `integrations` definition under `spec`. - -**Important:** The field `name` under the `spec` section of the manifest must contain the name of the integration to be installed according to the list of integrations defined [here]({{< relref "../static/configuration/integrations/integrations-next/_index.md#config-changes" >}}). - -**Important:** The value of the `metrics_instance` field needs to be in the format `/`, with namespace and name matching the values defined in the `metadata` section from the `MetricsInstance` resource as explained in [deploy a MetricsInstance resource]({{< relref "./deploy-agent-operator-resources.md#deploy-a-metricsinstance-resource" >}}) - -## Set up an Agent Operator node_exporter integration - -The Agent Operator node_exporter integration lets you monitor your hardware and OS metrics from Unix-based machines, including Linux machines. - -To set up a node_exporter integration: - -1. Copy the following manifest to a file: - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: Integration - metadata: - name: node-exporter - namespace: default - labels: - agent: grafana-agent-integrations - spec: - name: node_exporter - type: - allNodes: true - unique: true - config: - autoscrape: - enable: true - metrics_instance: default/primary - rootfs_path: /default/node_exporter/rootfs - sysfs_path: /default/node_exporter/sys - procfs_path: /default/node_exporter/proc - volumeMounts: - - mountPath: /default/node_exporter/proc - name: proc - - mountPath: /default/node_exporter/sys - name: sys - - mountPath: /default/node_exporter/rootfs - name: root - volumes: - - name: proc - hostPath: - path: /proc - - name: sys - hostPath: - path: /sys - - name: root - hostPath: - path: /root - ``` - -2. Customize the manifest as needed and roll it out to your cluster using `kubectl apply -f` followed by the filename. - - The manifest causes Agent Operator to create an instance of a grafana-agent-integrations-deploy resource that exports Node metrics. - -## Set up an Agent Operator mysqld_exporter integration - -The Agent Operator mysqld_exporter integration is an embedded version of mysqld_exporter that lets you collect metrics from MySQL servers. - -To set up a mysqld_exporter integration: - -1. Copy the following manifest to a file: - - ```yaml - apiVersion: monitoring.grafana.com/v1alpha1 - kind: Integration - metadata: - name: mysqld-exporter - namespace: default - labels: - agent: grafana-agent-integrations - spec: - name: mysql - type: - allNodes: true - unique: true - config: - autoscrape: - enable: true - metrics_instance: default/primary - data_source_name: root@(server-a:3306)/ - ``` - -2. Customize the manifest as needed and roll it out to your cluster using `kubectl apply -f` followed by the filename. - - The manifest causes Agent Operator to create an instance of a grafana-agent-integrations-deploy resource that exports MySQL metrics. diff --git a/docs/sources/operator/release-notes.md b/docs/sources/operator/release-notes.md deleted file mode 100644 index ec96084cd9..0000000000 --- a/docs/sources/operator/release-notes.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -aliases: -- ./upgrade-guide/ -- /docs/grafana-cloud/agent/operator/release-notes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/operator/release-notes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/operator/release-notes/ -- /docs/grafana-cloud/send-data/agent/operator/release-notes/ -canonical: https://grafana.com/docs/agent/latest/operator/release-notes/ -description: Release notes for Grafana Agent Operator -menuTitle: Release notes -title: Release notes for Grafana Agent Operator -weight: 999 ---- - -# Release notes for Grafana Agent Operator - -The release notes provide information about deprecations and breaking changes in Grafana Agent static mode Kubernetes operator. - -For a complete list of changes to Grafana Agent, with links to pull requests and related issues when available, refer to the [Changelog](https://github.com/grafana/agent/blob/main/CHANGELOG.md). - -> **Note:** These release notes are specific to the Static mode Kubernetes Operator. -> Other release notes for the different Grafana Agent variants are contained on separate pages: -> -> - [Static mode release notes][release-notes-static] -> - [Flow mode release notes][release-notes-flow] - -{{% docs/reference %}} -[release-notes-static]: "/docs/agent/ -> /docs/agent//static/release-notes" -[release-notes-static]: "/docs/agent/ -> /docs/grafana-cloud/send-data/agent/static/release-notes" - -[release-notes-flow]: "/docs/agent/ -> /docs/agent//flow/release-notes" -[release-notes-flow]: "/docs/grafana-cloud/ -> /docs/agent//flow/release-notes" -{{% /docs/reference %}} - -## v0.33 - -### Symbolic links in Docker containers removed - -We've removed the deprecated symbolic links to `/bin/agent*` in Docker -containers, as planned in v0.31. In case you're setting a custom entrypoint, -use the new binaries that are prefixed with `/bin/grafana*`. - -## v0.31 - -### Breaking change: binary names are now prefixed with `grafana-` - -As first announced in v0.29, the `grafana-operator` release binary names is now -prefixed with `grafana-`: - -- `agent-operator` is now `grafana-agent-operator`. - -For the `grafana/agent-operator` Docker container, the entrypoint is now -`/bin/grafana-agent-operator`. A symbolic link from `/bin/agent-operator` to -the new binary has been added. - -Symbolic links will be removed in v0.33. Custom entrypoints must be -updated prior to v0.33 to use the new binaries before the symbolic links get -removed. - -## v0.29 - -### Deprecation: binary names will be prefixed with `grafana-` in v0.31.0 - -The `agent-operator` binary name has been deprecated and will be renamed to -`grafana-agent-operator` in the v0.31.0 release. - -As part of this change, the Docker container for the v0.31.0 release will -include symbolic links from the old binary names to the new binary names. - -There is no action to take at this time. - -## v0.24 - -### Breaking change: Grafana Agent Operator supported Agent versions - -The v0.24.0 release of Grafana Agent Operator can no longer deploy versions of -Grafana Agent prior to v0.24.0. - -## v0.19 - -### Rename of Prometheus to Metrics (Breaking change) - -As a part of the deprecation of "Prometheus," all Operator CRDs and fields with -"Prometheus" in the name have changed to "Metrics." - -This includes: - -- The `PrometheusInstance` CRD is now `MetricsInstance` (referenced by - `metricsinstances` and not `metrics-instances` within ClusterRoles). -- The `Prometheus` field of the `GrafanaAgent` resource is now `Metrics` -- `PrometheusExternalLabelName` is now `MetricsExternalLabelName` - -This is a hard breaking change, and all fields must change accordingly for the -operator to continue working. - -Note that old CRDs with the old hyphenated names must be deleted (`kubectl -delete crds/{grafana-agents,prometheus-instances}`) for ClusterRoles to work -correctly. - -To do a zero-downtime upgrade of the Operator when there is a breaking change, -refer to the new `agentctl operator-detatch` command: this will iterate through -all of your objects and remove any OwnerReferences to a CRD, allowing you to -delete your Operator CRDs or CRs. - -### Rename of CRD paths (Breaking change) - -`prometheus-instances` and `grafana-agents` have been renamed to -`metricsinstances` and `grafanaagents` respectively. This is to remain -consistent with how Kubernetes names multi-word objects. - -As a result, you will need to update your ClusterRoles to change the path of -resources. - -To do a zero-downtime upgrade of the Operator when there is a breaking change, -refer to the new `agentctl operator-detatch` command: this will iterate through -all of your objects and remove any OwnerReferences to a CRD, allowing you to -delete your Operator CRDs or CRs. - - -Example old ClusterRole: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent-operator -rules: -- apiGroups: [monitoring.grafana.com] - resources: - - grafana-agents - - prometheus-instances - verbs: [get, list, watch] -``` - -Example new ClusterRole: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent-operator -rules: -- apiGroups: [monitoring.grafana.com] - resources: - - grafanaagents - - metricsinstances - verbs: [get, list, watch] -``` diff --git a/docs/sources/reference/_index.md b/docs/sources/reference/_index.md new file mode 100644 index 0000000000..6845cd1220 --- /dev/null +++ b/docs/sources/reference/_index.md @@ -0,0 +1,15 @@ +--- +aliases: +- ./reference/ +canonical: https://grafana.com/docs/alloy/latest/reference/ +description: The reference-level documentaiton for Grafana Aloy +menuTitle: Reference +title: Grafana Alloy Reference +weight: 600 +--- + +# {{% param "PRODUCT_NAME" %}} Reference + +This section provides reference-level documentation for the various parts of {{< param "PRODUCT_NAME" >}}: + +{{< section >}} diff --git a/docs/sources/reference/cli/_index.md b/docs/sources/reference/cli/_index.md new file mode 100644 index 0000000000..66e9c82b1d --- /dev/null +++ b/docs/sources/reference/cli/_index.md @@ -0,0 +1,29 @@ +--- +aliases: +- ./reference/cli/ +canonical: https://grafana.com/docs/alloy/latest/reference/cli/ +description: Learn about the Grafana Alloy command line interface +menuTitle: Command-line interface +title: The Grafana Agent command-line interface +weight: 100 +--- + +# The {{% param "PRODUCT_ROOT_NAME" %}} command-line interface + +The `grafana-alloy` binary exposes a command-line interface with subcommands to perform various operations. + +The most common subcommand is [`run`][run] which accepts a configuration file and starts {{< param "PRODUCT_NAME" >}}. + +Available commands: + +* [`convert`][convert]: Convert a {{< param "PRODUCT_ROOT_NAME" >}} configuration file. +* [`fmt`][fmt]: Format a {{< param "PRODUCT_NAME" >}} configuration file. +* [`run`][run]: Start {{< param "PRODUCT_NAME" >}}, given a configuration file. +* [`tools`][tools]: Read the WAL and provide statistical information. +* `completion`: Generate shell completion for the `grafana-agent-flow` CLI. +* `help`: Print help for supported commands. + +[run]: ./run/ +[fmt]: ./fmt/ +[convert]: ./convert/ +[tools]: ./tools/ diff --git a/docs/sources/reference/cli/convert.md b/docs/sources/reference/cli/convert.md new file mode 100644 index 0000000000..1a8ccfc7b2 --- /dev/null +++ b/docs/sources/reference/cli/convert.md @@ -0,0 +1,107 @@ +--- +aliases: +- ./reference/cli/convert/ +canonical: https://grafana.com/docs/alloy/latest/reference/cli/convert/ +description: Learn about the convert command +labels: + stage: beta +menuTitle: convert +title: The convert command +weight: 100 +--- + +# The convert command + +The `convert` command converts a supported configuration format to {{< param "PRODUCT_NAME" >}} River format. + +## Usage + +Usage: + +* `AGENT_MODE=flow grafana-agent convert [ ...] ` +* `grafana-agent-flow convert [ ...] ` + + Replace the following: + + * _``_: One or more flags that define the input and output of the command. + * _``_: The {{< param "PRODUCT_ROOT_NAME" >}} configuration file. + +If the `FILE_NAME` argument isn't provided or if the `FILE_NAME` argument is equal to `-`, `convert` converts the contents of standard input. +Otherwise, `convert` reads and converts the file from disk specified by the argument. + +There are several different flags available for the `convert` command. You can use the `--output` flag to write the contents of the converted configuration to a specified path. +You can use the `--report` flag to generate a diagnostic report. +The `--bypass-errors` flag allows you to bypass any [errors][] generated during the file conversion. + +The command fails if the source configuration has syntactically incorrect configuration or can't be converted to {{< param "PRODUCT_NAME" >}} River format. + +The following flags are supported: + +* `--output`, `-o`: The filepath and filename where the output is written. +* `--report`, `-r`: The filepath and filename where the report is written. +* `--source-format`, `-f`: Required. The format of the source file. Supported formats: [prometheus][], [promtail][], [static][]. +* `--bypass-errors`, `-b`: Enable bypassing errors when converting. +* `--extra-args`, `e`: Extra arguments from the original format used by the converter. + +### Defaults + +{{< param "PRODUCT_NAME" >}} defaults are managed as follows: +* If a provided source configuration value matches a {{< param "PRODUCT_NAME" >}} default value, the property is left off the output. +* If a non-provided source configuration value default matches a {{< param "PRODUCT_NAME" >}} default value, the property is left off the output. +* If a non-provided source configuration value default doesn't match a {{< param "PRODUCT_NAME" >}} default value, the default value is included in the output. + +### Errors + +Errors are defined as non-critical issues identified during the conversion where an output can still be generated. +These can be bypassed using the `--bypass-errors` flag. + +### Prometheus + +Using the `--source-format=prometheus` will convert the source configuration from [Prometheus v2.45][] to a {{< param "PRODUCT_NAME" >}} configuration. + +This includes Prometheus features such as [scrape_config][], [relabel_config][], [metric_relabel_configs][], [remote_write][], and many supported *_sd_configs. +Unsupported features in a source configuration result in [errors][]. + +Refer to [Migrate from Prometheus to {{< param "PRODUCT_NAME" >}}][migrate prometheus] for a detailed migration guide. + +### Promtail + +Using the `--source-format=promtail` will convert the source configuration from [Promtail v2.8.x][] to a {{< param "PRODUCT_NAME" >}} configuration. + +Nearly all [Promtail features][] are supported and can be converted to {{< param "PRODUCT_NAME" >}} configuration. + +If you have unsupported features in a source configuration, you will receive [errors][] when you convert to a flow configuration. +The converter will also raise warnings for configuration options that may require your attention. + +Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}][migrate promtail] for a detailed migration guide. + +### Static + +Using the `--source-format=static` will convert the source configuration from a [Grafana Agent Static][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. + +Include `--extra-args` for passing additional command line flags from the original format. +For example, `--extra-args="-enable-features=integrations-next"` will convert a Grafana Agent Static [integrations-next][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. +You can also expand environment variables with `--extra-args="-config.expand-env"`. +You can combine multiple command line flags with a space between each flag, for example `--extra-args="-enable-features=integrations-next -config.expand-env"`. + +If you have unsupported features in a Grafana Agent Static mode source configuration, you will receive [errors][] when you convert to a {{< param "PRODUCT_NAME" >}} configuration. +The converter will also raise warnings for configuration options that may require your attention. + +Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}][migrate static] for a detailed migration guide. + +[prometheus]: #prometheus +[promtail]: #promtail +[static]: #static +[errors]: #errors +[scrape_config]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config +[relabel_config]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#relabel_config +[metric_relabel_configs]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#metric_relabel_configs +[remote_write]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#remote_write +[migrate prometheus]: ../../../tasks/migrate/from-prometheus/ +[Promtail v2.8.x]: https://grafana.com/docs/loki/v2.8.x/clients/promtail/ +[Prometheus v2.45]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/ +[Promtail features]: https://grafana.com/docs/loki/v2.8.x/clients/promtail/configuration/ +[migrate promtail]: ../../../tasks/migrate/from-promtail/ +[Grafana Agent Static]: https://grafana.com/docs/agent/latest/static/ +[integrations-next]: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/ +[migrate static]: ../../../tasks/migrate/from-static/ diff --git a/docs/sources/reference/cli/fmt.md b/docs/sources/reference/cli/fmt.md new file mode 100644 index 0000000000..2163deb38c --- /dev/null +++ b/docs/sources/reference/cli/fmt.md @@ -0,0 +1,37 @@ +--- +aliases: +- ./reference/cli/fmt/ +canonical: https://grafana.com/docs/alloy/latest/reference/cli/fmt/ +description: Learn about the fmt command +menuTitle: fmt +title: The fmt command +weight: 200 +--- + +# The fmt command + +The `fmt` command formats a given {{< param "PRODUCT_NAME" >}} configuration file. + +## Usage + +Usage: + +* `AGENT_MODE=flow grafana-agent fmt [FLAG ...] FILE_NAME` +* `grafana-agent-flow fmt [FLAG ...] FILE_NAME` + + Replace the following: + + * `FLAG`: One or more flags that define the input and output of the command. + * `FILE_NAME`: The {{< param "PRODUCT_NAME" >}} configuration file. + +If the `FILE_NAME` argument isn't provided or if the `FILE_NAME` argument is equal to `-`, `fmt` formats the contents of standard input. +Otherwise, `fmt` reads and formats the file from disk specified by the argument. + +The `--write` flag can be specified to replace the contents of the original file on disk with the formatted results. +`--write` can only be provided when `fmt` isn't reading from standard input. + +The command fails if the file being formatted has syntactically incorrect River configuration, but doesn't validate whether {{< param "PRODUCT_NAME" >}} components are configured properly. + +The following flags are supported: + +* `--write`, `-w`: Write the formatted file back to disk when not reading from standard input. diff --git a/docs/sources/flow/reference/cli/run.md b/docs/sources/reference/cli/run.md similarity index 56% rename from docs/sources/flow/reference/cli/run.md rename to docs/sources/reference/cli/run.md index 4da0df47a4..9cb201d2b2 100644 --- a/docs/sources/flow/reference/cli/run.md +++ b/docs/sources/reference/cli/run.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/cli/run/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/run/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/run/ -- /docs/grafana-cloud/send-data/agent/flow/reference/cli/run/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/run/ +- ./reference/cli/run/ +canonical: https://grafana.com/docs/alloy/latest/reference/cli/run/ description: Learn about the run command menuTitle: run title: The run command @@ -27,27 +24,21 @@ Usage: * `FLAG`: One or more flags that define the input and output of the command. * `PATH_NAME`: Required. The {{< param "PRODUCT_NAME" >}} configuration file/directory path. -If the `PATH_NAME` argument is not provided, or if the configuration path can't be loaded or -contains errors during the initial load, the `run` command will immediately exit and show an error message. +If the `PATH_NAME` argument is not provided, or if the configuration path can't be loaded or contains errors during the initial load, the `run` command will immediately exit and show an error message. -If you give the `PATH_NAME` argument a directory path, {{< param "PRODUCT_NAME" >}} will find `*.river` files -(ignoring nested directories) and load them as a single configuration source. However, component names must -be **unique** across all River files, and configuration blocks must not be repeated. +If you give the `PATH_NAME` argument a directory path, {{< param "PRODUCT_NAME" >}} will find `*.river` files (ignoring nested directories) and load them as a single configuration source. +However, component names must be **unique** across all River files, and configuration blocks must not be repeated. -{{< param "PRODUCT_NAME" >}} will continue to run if subsequent reloads of the configuration -file fail, potentially marking components as unhealthy depending on the nature -of the failure. When this happens, {{< param "PRODUCT_NAME" >}} will continue functioning -in the last valid state. +{{< param "PRODUCT_NAME" >}} will continue to run if subsequent reloads of the configuration file fail, potentially marking components as unhealthy depending on the nature of the failure. +When this happens, {{< param "PRODUCT_NAME" >}} will continue functioning in the last valid state. -`run` launches an HTTP server that exposes metrics about itself and its -components. The HTTP server is also exposes a UI at `/` for debugging -running components. +`run` launches an HTTP server that exposes metrics about itself and its components. +The HTTP server is also exposes a UI at `/` for debugging running components. The following flags are supported: -* `--server.http.enable-pprof`: Enable /debug/pprof profiling endpoints. (default `true`) -* `--server.http.memory-addr`: Address to listen for [in-memory HTTP traffic][] on - (default `agent.internal:12345`). +* `--server.http.enable-pprof`: Enable /debug/pprof profiling endpoints. (default `true`). +* `--server.http.memory-addr`: Address to listen for [in-memory HTTP traffic][] on (default `agent.internal:12345`). * `--server.http.listen-addr`: Address to listen for HTTP traffic on (default `127.0.0.1:12345`). * `--server.http.ui-path-prefix`: Base path where the UI is exposed (default `/`). * `--storage.path`: Base directory where components can store data (default `data-agent/`). @@ -65,10 +56,6 @@ The following flags are supported: * `--config.bypass-conversion-errors`: Enable bypassing errors when converting (default `false`). * `--config.extra-args`: Extra arguments from the original format used by the converter. -[in-memory HTTP traffic]: {{< relref "../../concepts/component_controller.md#in-memory-traffic" >}} -[data collection]: {{< relref "../../../data-collection" >}} -[components]: {{< relref "../../concepts/components.md" >}} - ## Update the configuration file The configuration file can be reloaded from disk by either: @@ -76,74 +63,46 @@ The configuration file can be reloaded from disk by either: * Sending an HTTP POST request to the `/-/reload` endpoint. * Sending a `SIGHUP` signal to the {{< param "PRODUCT_NAME" >}} process. -When this happens, the [component controller][] synchronizes the set of running -components with the latest set of components specified in the configuration file. -Components that are no longer defined in the configuration file after reloading are -shut down, and components that have been added to the configuration file since the -previous reload are created. - -All components managed by the component controller are reevaluated after -reloading. +When this happens, the [component controller][] synchronizes the set of running components with the latest set of components specified in the configuration file. +Components that are no longer defined in the configuration file after reloading are shut down, and components that have been added to the configuration file since the previous reload are created. -[component controller]: {{< relref "../../concepts/component_controller.md" >}} +All components managed by the component controller are reevaluated after reloading. ## Clustering (beta) -The `--cluster.enabled` command-line argument starts {{< param "PRODUCT_ROOT_NAME" >}} in -[clustering][] mode. The rest of the `--cluster.*` command-line flags can be -used to configure how nodes discover and connect to one another. +The `--cluster.enabled` command-line argument starts {{< param "PRODUCT_ROOT_NAME" >}} in [clustering][] mode. +The rest of the `--cluster.*` command-line flags can be used to configure how nodes discover and connect to one another. -Each cluster member’s name must be unique within the cluster. Nodes which try -to join with a conflicting name are rejected and will fall back to -bootstrapping a new cluster of their own. +Each cluster member’s name must be unique within the cluster. +Nodes which try to join with a conflicting name are rejected and will fall back to bootstrapping a new cluster of their own. -Peers communicate over HTTP/2 on the built-in HTTP server. Each node -must be configured to accept connections on `--server.http.listen-addr` and the -address defined or inferred in `--cluster.advertise-address`. +Peers communicate over HTTP/2 on the built-in HTTP server. +Each node must be configured to accept connections on `--server.http.listen-addr` and the address defined or inferred in `--cluster.advertise-address`. -If the `--cluster.advertise-address` flag isn't explicitly set, {{< param "PRODUCT_NAME" >}} -tries to infer a suitable one from `--cluster.advertise-interfaces`. -If `--cluster.advertise-interfaces` isn't explicitly set, {{< param "PRODUCT_NAME" >}} will -infer one from the `eth0` and `en0` local network interfaces. +If the `--cluster.advertise-address` flag isn't explicitly set, {{< param "PRODUCT_NAME" >}} tries to infer a suitable one from `--cluster.advertise-interfaces`. +If `--cluster.advertise-interfaces` isn't explicitly set, {{< param "PRODUCT_NAME" >}} will infer one from the `eth0` and `en0` local network interfaces. {{< param "PRODUCT_NAME" >}} will fail to start if it can't determine the advertised address. -Since Windows doesn't use the interface names `eth0` or `en0`, Windows users must explicitly pass -at least one valid network interface for `--cluster.advertise-interfaces` or a value for `--cluster.advertise-address`. - -The comma-separated list of addresses provided in `--cluster.join-addresses` -can either be IP addresses with an optional port, or DNS records to lookup. -The ports on the list of addresses default to the port used for the HTTP -listener if not explicitly provided. We recommend that you -align the port numbers on as many nodes as possible to simplify the deployment -process. - -The `--cluster.discover-peers` command-line flag expects a list of tuples in -the form of `provider=XXX key=val key=val ...`. Clustering uses the -[go-discover] package to discover peers and fetch their IP addresses, based -on the chosen provider and the filtering key-values it supports. Clustering -supports the default set of providers available in go-discover and registers -the `k8s` provider on top. - -If either the key or the value in a tuple pair contains a space, a backslash, or -double quotes, then it must be quoted with double quotes. Within this quoted -string, the backslash can be used to escape double quotes or the backslash -itself. - -The `--cluster.rejoin-interval` flag defines how often each node should -rediscover peers based on the contents of the `--cluster.join-addresses` and -`--cluster.discover-peers` flags and try to rejoin them. This operation -is useful for addressing split-brain issues if the initial bootstrap is -unsuccessful and for making clustering easier to manage in dynamic -environments. To disable this behavior, set the `--cluster.rejoin-interval` -flag to `"0s"`. - -Discovering peers using the `--cluster.join-addresses` and -`--cluster.discover-peers` flags only happens on startup; after that, cluster -nodes depend on gossiping messages with each other to converge on the cluster's -state. - -The first node that is used to bootstrap a new cluster (also known as -the "seed node") can either omit the flags that specify peers to join or can -try to connect to itself. +Since Windows doesn't use the interface names `eth0` or `en0`, Windows users must explicitly pass at least one valid network interface for `--cluster.advertise-interfaces` or a value for `--cluster.advertise-address`. + +The comma-separated list of addresses provided in `--cluster.join-addresses` can either be IP addresses with an optional port, or DNS records to lookup. +The ports on the list of addresses default to the port used for the HTTP listener if not explicitly provided. +We recommend that you align the port numbers on as many nodes as possible to simplify the deployment process. + +The `--cluster.discover-peers` command-line flag expects a list of tuples in the form of `provider=XXX key=val key=val ...`. +Clustering uses the [go-discover] package to discover peers and fetch their IP addresses, based on the chosen provider and the filtering key-values it supports. +Clustering supports the default set of providers available in go-discover and registers the `k8s` provider on top. + +If either the key or the value in a tuple pair contains a space, a backslash, or double quotes, then it must be quoted with double quotes. +Within this quoted string, the backslash can be used to escape double quotes or the backslash itself. + +The `--cluster.rejoin-interval` flag defines how often each node should rediscover peers based on the contents of the `--cluster.join-addresses` and `--cluster.discover-peers` flags and try to rejoin them. +This operation is useful for addressing split-brain issues if the initial bootstrap is unsuccessful and for making clustering easier to manage in dynamic environments. +To disable this behavior, set the `--cluster.rejoin-interval` flag to `"0s"`. + +Discovering peers using the `--cluster.join-addresses` and `--cluster.discover-peers` flags only happens on startup. +After that, cluster nodes depend on gossiping messages with each other to converge on the cluster's state. + +The first node that is used to bootstrap a new cluster (also known as the "seed node") can either omit the flags that specify peers to join or can try to connect to itself. To join or rejoin a cluster, {{< param "PRODUCT_NAME" >}} will try to connect to a certain number of peers limited by the `--cluster.max-join-peers` flag. This flag can be useful for clusters of significant sizes because connecting to a high number of peers can be an expensive operation. @@ -160,26 +119,18 @@ Attempting to join a cluster with a wrong `--cluster.name` will result in a "fai Clustered {{< param "PRODUCT_ROOT_NAME" >}}s are in one of three states: * **Viewer**: {{< param "PRODUCT_NAME" >}} has a read-only view of the cluster and isn't participating in workload distribution. - * **Participant**: {{< param "PRODUCT_NAME" >}} is participating in workload distribution for components that have clustering enabled. - * **Terminating**: {{< param "PRODUCT_NAME" >}} is shutting down and will no longer assign new work to itself. -Each {{< param "PRODUCT_ROOT_NAME" >}} initially joins the cluster in the viewer state and then transitions to -the participant state after the process startup completes. Each {{< param "PRODUCT_ROOT_NAME" >}} then -transitions to the terminating state when shutting down. +Each {{< param "PRODUCT_ROOT_NAME" >}} initially joins the cluster in the viewer state and then transitions to the participant state after the process startup completes. +Each {{< param "PRODUCT_ROOT_NAME" >}} then transitions to the terminating state when shutting down. The current state of a clustered {{< param "PRODUCT_ROOT_NAME" >}} is shown on the clustering page in the [UI][]. -[UI]: {{< relref "../../tasks/debug.md#clustering-page" >}} - ## Configuration conversion (beta) -When you use the `--config.format` command-line argument with a value -other than `flow`, {{< param "PRODUCT_ROOT_NAME" >}} converts the configuration file from -the source format to River and immediately starts running with the new -configuration. This conversion uses the converter API described in the -[grafana-agent-flow convert][] docs. +When you use the `--config.format` command-line argument with a value other than `flow`, {{< param "PRODUCT_ROOT_NAME" >}} converts the configuration file from the source format to River and immediately starts running with the new configuration. +This conversion uses the converter API described in the [grafana-alloy convert][] docs. If you include the `--config.bypass-conversion-errors` command-line argument, {{< param "PRODUCT_NAME" >}} will ignore any errors from the converter. Use this argument @@ -189,6 +140,11 @@ original configuration. Include `--config.extra-args` to pass additional command line flags from the original format to the converter. Refer to [grafana-agent-flow convert][] for more details on how `extra-args` work. -[grafana-agent-flow convert]: {{< relref "./convert.md" >}} -[clustering]: {{< relref "../../concepts/clustering.md" >}} +[grafana-alloy convert]: ../convert/ +[clustering]: ../../../concepts/clustering/ [go-discover]: https://github.com/hashicorp/go-discover +[in-memory HTTP traffic]: ../../../concepts/component_controller/#in-memory-traffic +[data collection]: ../../../data-collection/ +[components]: ../../concepts/components/ +[component controller]: ../../../concepts/component_controller/ +[UI]: ../../tasks/debug/#clustering-page diff --git a/docs/sources/flow/reference/cli/tools.md b/docs/sources/reference/cli/tools.md similarity index 69% rename from docs/sources/flow/reference/cli/tools.md rename to docs/sources/reference/cli/tools.md index b9fb73a761..2eb29895bb 100644 --- a/docs/sources/flow/reference/cli/tools.md +++ b/docs/sources/reference/cli/tools.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/cli/tools/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/cli/tools/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/cli/tools/ -- /docs/grafana-cloud/send-data/agent/flow/reference/cli/tools/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/cli/tools/ +- ./reference/cli/tools/ +canonical: https://grafana.com/docs/aalloyent/latest/reference/cli/tools/ description: Learn about the tools command menuTitle: tools title: The tools command @@ -13,11 +10,10 @@ weight: 400 # The tools command -The `tools` command contains command line tooling grouped by Flow component. +The `tools` command contains command line tooling grouped by {{< param "PRODUCT_NAME" >}} component. {{< admonition type="caution" >}} -Utilities in this command have no backward compatibility -guarantees and may change or be removed between releases. +Utilities in this command have no backward compatibility guarantees and may change or be removed between releases. {{< /admonition >}} ## Subcommands @@ -29,8 +25,7 @@ Usage: * `AGENT_MODE=flow grafana-agent tools prometheus.remote_write sample-stats [FLAG ...] WAL_DIRECTORY` * `grafana-agent-flow tools prometheus.remote_write sample-stats [FLAG ...] WAL_DIRECTORY` -The `sample-stats` command reads the Write-Ahead Log (WAL) specified by -`WAL_DIRECTORY` and collects information on metric samples within it. +The `sample-stats` command reads the Write-Ahead Log (WAL) specified by `WAL_DIRECTORY` and collects information on metric samples within it. For each metric discovered, `sample-stats` emits: @@ -52,13 +47,9 @@ Usage: * `AGENT_MODE=flow grafana-agent tools prometheus.remote_write target-stats --job JOB --instance INSTANCE WAL_DIRECTORY` * `grafana-agent-flow tools prometheus.remote_write target-stats --job JOB --instance INSTANCE WAL_DIRECTORY` -The `target-stats` command reads the Write-Ahead Log (WAL) specified by -`WAL_DIRECTORY` and collects metric cardinality information for a specific -target. +The `target-stats` command reads the Write-Ahead Log (WAL) specified by `WAL_DIRECTORY` and collects metric cardinality information for a specific target. -For the target specified by the `--job` and `--instance` flags, unique metric -names for that target are printed along with the number of series with that -metric name. +For the target specified by the `--job` and `--instance` flags, unique metric names for that target are printed along with the number of series with that metric name. The following flags are supported: @@ -69,13 +60,12 @@ The `--job` and `--instance` labels are required. ### prometheus.remote_write wal-stats -Usage: +Usage: * `AGENT_MODE=flow grafana-agent tools prometheus.remote_write wal-stats WAL_DIRECTORY` * `grafana-agent-flow tools prometheus.remote_write wal-stats WAL_DIRECTORY` -The `wal-stats` command reads the Write-Ahead Log (WAL) specified by -`WAL_DIRECTORY` and collects general information about it. +The `wal-stats` command reads the Write-Ahead Log (WAL) specified by `WAL_DIRECTORY` and collects general information about it. The following information is reported: @@ -89,9 +79,7 @@ The following information is reported: * The oldest segment number in the WAL. * The newest segment number in the WAL. -Additionally, `wal-stats` reports per-target information, where a target is -defined as a unique combination of the `job` and `instance` label values. For -each target, `wal-stats` reports the number of series and the number of -metric samples associated with that target. +Additionally, `wal-stats` reports per-target information, where a target is defined as a unique combination of the `job` and `instance` label values. +For each target, `wal-stats` reports the number of series and the number of metric samples associated with that target. The `wal-stats` command does not support any flags. diff --git a/docs/sources/flow/reference/compatibility/_index.md b/docs/sources/reference/compatibility/_index.md similarity index 97% rename from docs/sources/flow/reference/compatibility/_index.md rename to docs/sources/reference/compatibility/_index.md index 61775bcf26..d3d3154f49 100644 --- a/docs/sources/flow/reference/compatibility/_index.md +++ b/docs/sources/reference/compatibility/_index.md @@ -1,11 +1,8 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/compatible-components/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/compatible-components/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/compatible-components/ -- /docs/grafana-cloud/send-data/agent/flow/reference/compatible-components/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/compatibility/ -description: Learn about which components are compatible with each other in Grafana Agent Flow +- ./reference/compatibility/ +canonical: https://grafana.com/docs/alloy/latest/reference/compatibility/ +description: Learn about which components are compatible with each other in Grafana Alloy title: Compatible components weight: 400 --- diff --git a/docs/sources/reference/components/_index.md b/docs/sources/reference/components/_index.md new file mode 100644 index 0000000000..53ba25bff4 --- /dev/null +++ b/docs/sources/reference/components/_index.md @@ -0,0 +1,14 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/reference/components/ +description: Learn about the components in Grafana Agent Flow +title: Components reference +weight: 300 +--- + +# Components reference + +This section contains reference documentation for all recognized [components][]. + +{{< section >}} + +[components]: ../../concepts/components/ diff --git a/docs/sources/flow/reference/components/discovery.azure.md b/docs/sources/reference/components/discovery.azure.md similarity index 67% rename from docs/sources/flow/reference/components/discovery.azure.md rename to docs/sources/reference/components/discovery.azure.md index 9970dc4fde..942932d59a 100644 --- a/docs/sources/flow/reference/components/discovery.azure.md +++ b/docs/sources/reference/components/discovery.azure.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.azure/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.azure/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.azure/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.azure/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.azure/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.azure/ description: Learn about discovery.azure title: discovery.azure --- @@ -26,30 +21,30 @@ discovery.azure "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ---------- | ---------------------------------------------------------------------- | -------------------- | -------- -`environment` | `string` | Azure environment. | `"AzurePublicCloud"` | no -`port` | `number` | Port to be appended to the `__address__` label for each target. | `80` | no -`subscription_id` | `string` | Azure subscription ID. | | no -`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `5m` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no - -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|----------------------|--------- +`environment` | `string` | Azure environment. | `"AzurePublicCloud"` | no +`port` | `number` | Port to be appended to the `__address__` label for each target. | `80` | no +`subscription_id` | `string` | Azure subscription ID. | | no +`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `5m` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no + +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.azure`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -oauth | [oauth][] | OAuth configuration for Azure API. | no -managed_identity | [managed_identity][] | Managed Identity configuration for Azure API. | no -tls_config | [tls_config][] | TLS configuration for requests to the Azure API. | no +Hierarchy | Block | Description | Required +-----------------|----------------------|--------------------------------------------------|--------- +oauth | [oauth][] | OAuth configuration for Azure API. | no +managed_identity | [managed_identity][] | Managed Identity configuration for Azure API. | no +tls_config | [tls_config][] | TLS configuration for requests to the Azure API. | no Exactly one of the `oauth` or `managed_identity` blocks must be specified. @@ -60,11 +55,11 @@ Exactly one of the `oauth` or `managed_identity` blocks must be specified. ### oauth block The `oauth` block configures OAuth authentication for the Azure API. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`client_id` | `string` | OAuth client ID. | | yes -`client_secret` | `string` | OAuth client secret. | | yes -`tenant_id` | `string` | OAuth tenant ID. | | yes +Name | Type | Description | Default | Required +----------------|----------|----------------------|---------|--------- +`client_id` | `string` | OAuth client ID. | | yes +`client_secret` | `string` | OAuth client secret. | | yes +`tenant_id` | `string` | OAuth tenant ID. | | yes ### managed_identity block The `managed_identity` block configures Managed Identity authentication for the Azure API. @@ -75,7 +70,7 @@ Name | Type | Description | Default | Required ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/discovery.consul.md b/docs/sources/reference/components/discovery.consul.md similarity index 51% rename from docs/sources/flow/reference/components/discovery.consul.md rename to docs/sources/reference/components/discovery.consul.md index cf96dba94b..564a789556 100644 --- a/docs/sources/flow/reference/components/discovery.consul.md +++ b/docs/sources/reference/components/discovery.consul.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.consul/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.consul/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.consul/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.consul/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.consul/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.consul/ description: Learn about discovery.consul title: discovery.consul --- @@ -27,30 +22,30 @@ discovery.consul "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`server` | `string` | Host and port of the Consul API. | `localhost:8500` | no -`token` | `secret` | Secret token used to access the Consul API. | | no -`datacenter` | `string` | Datacenter to query. If not provided, the default is used. | | no -`namespace` | `string` | Namespace to use (only supported in Consul Enterprise). | | no -`partition` | `string` | Admin partition to use (only supported in Consul Enterprise). | | no -`tag_separator` | `string` | The string by which Consul tags are joined into the tag label. | `,` | no -`scheme` | `string` | The scheme to use when talking to Consul. | `http` | no -`username` | `string` | The username to use (deprecated in favor of the basic_auth configuration). | | no -`password` | `secret` | The password to use (deprecated in favor of the basic_auth configuration). | | no -`allow_stale` | `bool` | Allow stale Consul results (see [official documentation][consistency documentation]). Will reduce load on Consul. | `true` | no -`services` | `list(string)` | A list of services for which targets are retrieved. If omitted, all services are scraped. | | no -`tags` | `list(string)` | An optional list of tags used to filter nodes for a given service. Services must contain all tags in the list. | | no -`node_meta` | `map(string)` | Node metadata key/value pairs to filter nodes for a given service. | | no -`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"30s"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|-------------------------------------------------------------------------------------------------------------------|------------------|--------- +`server` | `string` | Host and port of the Consul API. | `localhost:8500` | no +`token` | `secret` | Secret token used to access the Consul API. | | no +`datacenter` | `string` | Datacenter to query. If not provided, the default is used. | | no +`namespace` | `string` | Namespace to use (only supported in Consul Enterprise). | | no +`partition` | `string` | Admin partition to use (only supported in Consul Enterprise). | | no +`tag_separator` | `string` | The string by which Consul tags are joined into the tag label. | `,` | no +`scheme` | `string` | The scheme to use when talking to Consul. | `http` | no +`username` | `string` | The username to use (deprecated in favor of the basic_auth configuration). | | no +`password` | `secret` | The password to use (deprecated in favor of the basic_auth configuration). | | no +`allow_stale` | `bool` | Allow stale Consul results (see [official documentation][consistency documentation]). Will reduce load on Consul. | `true` | no +`services` | `list(string)` | A list of services for which targets are retrieved. If omitted, all services are scraped. | | no +`tags` | `list(string)` | An optional list of tags used to filter nodes for a given service. Services must contain all tags in the list. | | no +`node_meta` | `map(string)` | Node metadata key/value pairs to filter nodes for a given service. | | no +`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"30s"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -59,7 +54,7 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} [consistency documentation]: https://www.consul.io/api/features/consistency.html [arguments]: #arguments @@ -77,9 +72,8 @@ oauth2 | [oauth2][] | Configure OAuth2 for authenticating to oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -88,26 +82,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|----------------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Consul catalog API. Each target includes the following labels: @@ -128,9 +122,8 @@ Each target includes the following labels: ## Component health -`discovery.consul` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.consul` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.consulagent.md b/docs/sources/reference/components/discovery.consulagent.md similarity index 94% rename from docs/sources/flow/reference/components/discovery.consulagent.md rename to docs/sources/reference/components/discovery.consulagent.md index 340d1f6b5d..9cc23de10b 100644 --- a/docs/sources/flow/reference/components/discovery.consulagent.md +++ b/docs/sources/reference/components/discovery.consulagent.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.consulagent/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.consulagent/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.consulagent/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.consulagent/ description: Learn about discovery.consulagent title: discovery.consulagent --- @@ -53,7 +50,7 @@ The following blocks are supported inside the definition of ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/discovery.digitalocean.md b/docs/sources/reference/components/discovery.digitalocean.md similarity index 76% rename from docs/sources/flow/reference/components/discovery.digitalocean.md rename to docs/sources/reference/components/discovery.digitalocean.md index faaa8e1ea8..c2bc5b9639 100644 --- a/docs/sources/flow/reference/components/discovery.digitalocean.md +++ b/docs/sources/reference/components/discovery.digitalocean.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.digitalocean/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.digitalocean/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.digitalocean/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.digitalocean/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.digitalocean/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.digitalocean/ description: Learn about discovery.digitalocean title: discovery.digitalocean --- @@ -29,18 +24,18 @@ discovery.digitalocean "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`port` | `number` | Port to be appended to the `__address__` label for each Droplet. | `80` | no -`refresh_interval` | `duration` | Frequency to refresh list of Droplets. | `"1m"` | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`port` | `number` | Port to be appended to the `__address__` label for each Droplet. | `80` | no +`refresh_interval` | `duration` | Frequency to refresh list of Droplets. | `"1m"` | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no The DigitalOcean API uses bearer tokens for authentication, see more about it in the [DigitalOcean API documentation](https://docs.digitalocean.com/reference/api/api-reference/#section/Authentication). @@ -48,7 +43,7 @@ Exactly one of the [`bearer_token`](#arguments) and [`bearer_token_file`](#argum [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The `discovery.digitalocean` component does not support any blocks, and is configured fully through arguments. @@ -59,7 +54,7 @@ The `discovery.digitalocean` component does not support any blocks, and is confi The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|--------------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the DigitalOcean API. Each target includes the following labels: diff --git a/docs/sources/flow/reference/components/discovery.dns.md b/docs/sources/reference/components/discovery.dns.md similarity index 70% rename from docs/sources/flow/reference/components/discovery.dns.md rename to docs/sources/reference/components/discovery.dns.md index a54890c240..73f1d8c8a8 100644 --- a/docs/sources/flow/reference/components/discovery.dns.md +++ b/docs/sources/reference/components/discovery.dns.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/latest/flow/reference/components/discovery.dns/ -- /docs/grafana-cloud/agent/flow/reference/components/discovery.dns/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.dns/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.dns/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.dns/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.dns/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.dns/ description: Learn about discovery.dns title: discovery.dns --- @@ -26,12 +20,12 @@ discovery.dns "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`names` | `list(string)` | DNS names to look up. | | yes -`port` | `number` | Port to use for collecting metrics. Not used for SRV records. | `0` | no -`refresh_interval` | `duration` | How often to query DNS for updates. | `"30s"` | no -`type` | `string` | Type of DNS record to query. Must be one of SRV, A, AAAA, or MX. | `"SRV"` | no +Name | Type | Description | Default | Required +-------------------|----------------|------------------------------------------------------------------|---------|--------- +`names` | `list(string)` | DNS names to look up. | | yes +`port` | `number` | Port to use for collecting metrics. Not used for SRV records. | `0` | no +`refresh_interval` | `duration` | How often to query DNS for updates. | `"30s"` | no +`type` | `string` | Type of DNS record to query. Must be one of SRV, A, AAAA, or MX. | `"SRV"` | no ## Exported fields @@ -51,9 +45,8 @@ Each target includes the following labels: ## Component health -`discovery.dns` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.dns` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.docker.md b/docs/sources/reference/components/discovery.docker.md similarity index 59% rename from docs/sources/flow/reference/components/discovery.docker.md rename to docs/sources/reference/components/discovery.docker.md index d9b5a02713..0ab823d22d 100644 --- a/docs/sources/flow/reference/components/discovery.docker.md +++ b/docs/sources/reference/components/discovery.docker.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.docker/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.docker/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.docker/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.docker/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.docker/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.docker/ description: Learn about discovery.docker title: discovery.docker --- @@ -27,20 +22,20 @@ discovery.docker "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`host` | `string` | Address of the Docker Daemon to connect to. | | yes -`port` | `number` | Port to use for collecting metrics when containers don't have any port mappings. | `80` | no -`host_networking_host` | `string` | Host to use if the container is in host networking mode. | `"localhost"` | no -`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"1m"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------------|--------- +`host` | `string` | Address of the Docker Daemon to connect to. | | yes +`port` | `number` | Port to use for collecting metrics when containers don't have any port mappings. | `80` | no +`host_networking_host` | `string` | Host to use if the container is in host networking mode. | `"localhost"` | no +`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"1m"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -51,25 +46,24 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.docker`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -filter | [filter][] | Filters discoverable resources. | no -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +filter | [filter][] | Filters discoverable resources. | no +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [filter]: #filter-block [basic_auth]: #basic_auth-block @@ -79,42 +73,40 @@ an `oauth2` block. ### filter block -The `filter` block configures a filter to pass to the Docker Engine to limit -the amount of containers returned. The `filter` block can be specified multiple -times to provide more than one filter. +The `filter` block configures a filter to pass to the Docker Engine to limit the amount of containers returned. +The `filter` block can be specified multiple times to provide more than one filter. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`name` | `string` | Filter name to use. | | yes -`values` | `list(string)` | Values to pass to the filter. | | yes +Name | Type | Description | Default | Required +---------|----------------|-------------------------------|---------|--------- +`name` | `string` | Filter name to use. | | yes +`values` | `list(string)` | Values to pass to the filter. | | yes -Refer to [List containers][List containers] from the Docker Engine API -documentation for the list of supported filters and their meaning. +Refer to [List containers][List containers] from the Docker Engine API documentation for the list of supported filters and their meaning. [List containers]: https://docs.docker.com/engine/api/v1.41/#tag/Container/operation/ContainerList ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the docker API. Each target includes the following labels: @@ -125,28 +117,21 @@ Each target includes the following labels: * `__meta_docker_container_label_`: Each label from the container. * `__meta_docker_network_id`: ID of the Docker network the container is in. * `__meta_docker_network_name`: Name of the Docker network the container is in. -* `__meta_docker_network_ingress`: Set to `true` if the Docker network is an - ingress network. -* `__meta_docker_network_internal`: Set to `true` if the Docker network is an - internal network. -* `__meta_docker_network_label_`: Each label from the network the - container is in. +* `__meta_docker_network_ingress`: Set to `true` if the Docker network is an ingress network. +* `__meta_docker_network_internal`: Set to `true` if the Docker network is an internal network. +* `__meta_docker_network_label_`: Each label from the network the container is in. * `__meta_docker_network_scope`: The scope of the network the container is in. * `__meta_docker_network_ip`: The IP of the container in the network. * `__meta_docker_port_private`: The private port on the container. -* `__meta_docker_port_public`: The publicly exposed port from the container, - if a port mapping exists. -* `__meta_docker_port_public_ip`: The public IP of the container, if a port - mapping exists. +* `__meta_docker_port_public`: The publicly exposed port from the container, if a port mapping exists. +* `__meta_docker_port_public_ip`: The public IP of the container, if a port mapping exists. -Each discovered container maps to one target per unique combination of networks -and port mappings used by the container. +Each discovered container maps to one target per unique combination of networks and port mappings used by the container. ## Component health -`discovery.docker` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.docker` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -160,8 +145,7 @@ values. ### Linux or macOS hosts -This example discovers Docker containers when the host machine is macOS or -Linux: +This example discovers Docker containers when the host machine is macOS or Linux: ```river discovery.docker "containers" { diff --git a/docs/sources/flow/reference/components/discovery.dockerswarm.md b/docs/sources/reference/components/discovery.dockerswarm.md similarity index 79% rename from docs/sources/flow/reference/components/discovery.dockerswarm.md rename to docs/sources/reference/components/discovery.dockerswarm.md index d02a044f5c..e2bc5d9c3e 100644 --- a/docs/sources/flow/reference/components/discovery.dockerswarm.md +++ b/docs/sources/reference/components/discovery.dockerswarm.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.dockerswarm/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.dockerswarm/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.dockerswarm/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.dockerswarm/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.dockerswarm/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.dockerswarm/ description: Learn about discovery.dockerswarm title: discovery.dockerswarm --- @@ -26,20 +21,20 @@ discovery.dockerswarm "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`host` | `string` | Address of the Docker daemon. | | yes -`role` | `string` | Role of the targets to retrieve. Must be `services`, `tasks`, or `nodes`. | | yes +Name | Type | Description | Default | Required +-------------------------|---------------------|-------------------------------------------------------------------------------------------------------------------------------|---------|--------- +`host` | `string` | Address of the Docker daemon. | | yes +`role` | `string` | Role of the targets to retrieve. Must be `services`, `tasks`, or `nodes`. | | yes `port` | `number` | The port to scrape metrics from, when `role` is nodes, and for discovered tasks and services that don't have published ports. | `80` | no -`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `"60s"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `"60s"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -48,7 +43,7 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} [arguments]: #arguments @@ -95,19 +90,19 @@ The following arguments can be used to configure a filter. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -123,7 +118,9 @@ The `role` attribute decides the role of the targets to retrieve. ### services -The `services` role discovers all [Swarm services](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks) and exposes their ports as targets. For each published port of a service, a single target is generated. If a service has no published ports, a target per service is created using the `port` attribute defined in the arguments. +The `services` role discovers all [Swarm services](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks) and exposes their ports as targets. +For each published port of a service, a single target is generated. +If a service has no published ports, a target per service is created using the `port` attribute defined in the arguments. Available meta labels: @@ -145,7 +142,9 @@ Available meta labels: ### tasks -The `tasks` role discovers all [Swarm tasks](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks) and exposes their ports as targets. For each published port of a task, a single target is generated. If a task has no published ports, a target per task is created using the `port` attribute defined in the arguments. +The `tasks` role discovers all [Swarm tasks](https://docs.docker.com/engine/swarm/key-concepts/#services-and-tasks) and exposes their ports as targets. +For each published port of a task, a single target is generated. +If a task has no published ports, a target per task is created using the `port` attribute defined in the arguments. Available meta labels: @@ -201,9 +200,8 @@ Available meta labels: ## Component health -`discovery.dockerswarm` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.dockerswarm` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.ec2.md b/docs/sources/reference/components/discovery.ec2.md similarity index 62% rename from docs/sources/flow/reference/components/discovery.ec2.md rename to docs/sources/reference/components/discovery.ec2.md index 6345018f11..90ebb0109b 100644 --- a/docs/sources/flow/reference/components/discovery.ec2.md +++ b/docs/sources/reference/components/discovery.ec2.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.ec2/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.ec2/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.ec2/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.ec2/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.ec2/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.ec2/ description: Learn about discovery.ec2 title: discovery.ec2 --- @@ -26,24 +21,24 @@ discovery.ec2 "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`endpoint` | `string` | Custom endpoint to be used. | | no -`region` | `string` | The AWS region. If blank, the region from the instance metadata is used. | | no -`access_key` | `string` | The AWS API key ID. If blank, the environment variable `AWS_ACCESS_KEY_ID` is used. | | no -`secret_key` | `string` | The AWS API key secret. If blank, the environment variable `AWS_SECRET_ACCESS_KEY` is used. | | no -`profile` | `string` | Named AWS profile used to connect to the API. | | no -`role_arn` | `string` | AWS Role Amazon Resource Name (ARN), an alternative to using AWS API keys. | | no -`refresh_interval` | `string` | Refresh interval to re-read the instance list. | 60s | no -`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | 80 | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|-------------------------------------------------------------------------------------------------------------------------|---------|--------- +`endpoint` | `string` | Custom endpoint to be used. | | no +`region` | `string` | The AWS region. If blank, the region from the instance metadata is used. | | no +`access_key` | `string` | The AWS API key ID. If blank, the environment variable `AWS_ACCESS_KEY_ID` is used. | | no +`secret_key` | `string` | The AWS API key secret. If blank, the environment variable `AWS_SECRET_ACCESS_KEY` is used. | | no +`profile` | `string` | Named AWS profile used to connect to the API. | | no +`role_arn` | `string` | AWS Role Amazon Resource Name (ARN), an alternative to using AWS API keys. | | no +`refresh_interval` | `string` | Refresh interval to re-read the instance list. | 60s | no +`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | 80 | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -52,21 +47,21 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. - {{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} + {{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.ec2`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -filter | [filter][] | Filters discoverable resources. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +filter | [filter][] | Filters discoverable resources. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no [filter]: #filter-block [authorization]: #authorization-block @@ -75,17 +70,17 @@ tls_config | [tls_config][] | Configure TLS settings for connecting to the endpo ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### filter block Filters can be used optionally to filter the instance list by other criteria. Available filter criteria can be found in the [Amazon EC2 documentation](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html). -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`name` | `string` | Filter name to use. | | yes -`values` | `list(string)` | Values to pass to the filter. | | yes +Name | Type | Description | Default | Required +---------|----------------|-------------------------------|---------|--------- +`name` | `string` | Filter name to use. | | yes +`values` | `list(string)` | Values to pass to the filter. | | yes Refer to the [Filter API AWS EC2 documentation][filter api] for the list of supported filters and their descriptions. @@ -93,11 +88,11 @@ Refer to the [Filter API AWS EC2 documentation][filter api] for the list of supp ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -132,9 +127,8 @@ Each target includes the following labels: ## Component health -`discovery.ec2` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.ec2` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.eureka.md b/docs/sources/reference/components/discovery.eureka.md similarity index 61% rename from docs/sources/flow/reference/components/discovery.eureka.md rename to docs/sources/reference/components/discovery.eureka.md index 1cb3dd50da..dfcc6fea56 100644 --- a/docs/sources/flow/reference/components/discovery.eureka.md +++ b/docs/sources/reference/components/discovery.eureka.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.eureka/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.eureka/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.eureka/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.eureka/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.eureka/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.eureka/ description: Learn about discovery.eureka title: discovery.eureka --- @@ -27,18 +22,18 @@ discovery.eureka "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`server` | `string` | Eureka server URL. | | yes -`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `30s` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`server` | `string` | Eureka server URL. | | yes +`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `30s` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -49,19 +44,19 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.eureka`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -74,26 +69,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Eureka API. Each target includes the following labels: @@ -119,9 +114,8 @@ Each target includes the following labels: ## Component health -`discovery.eureka` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.eureka` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.file.md b/docs/sources/reference/components/discovery.file.md similarity index 71% rename from docs/sources/flow/reference/components/discovery.file.md rename to docs/sources/reference/components/discovery.file.md index 67335bf5e1..58308e683f 100644 --- a/docs/sources/flow/reference/components/discovery.file.md +++ b/docs/sources/reference/components/discovery.file.md @@ -1,25 +1,18 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.file/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.file/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.file/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.file/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.file/ description: Learn about discovery.file title: discovery.file --- # discovery.file -> **NOTE:** In {{< param "PRODUCT_ROOT_NAME" >}} `v0.35.0`, the `discovery.file` component was renamed to [local.file_match][], -> and `discovery.file` was repurposed to discover scrape targets from one or more files. -> ->
-> -> If you are trying to discover files on the local filesystem rather than scrape -> targets within a set of files, you should use [local.file_match][] instead. +{{< admonition type="note" >}} +In {{< param "PRODUCT_ROOT_NAME" >}} `v0.35.0`, the `discovery.file` component was renamed to [local.file_match][], and `discovery.file` was repurposed to discover scrape targets from one or more files. + +If you are trying to discover files on the local filesystem rather than scrape targets within a set of files, you should use [local.file_match][] instead. -[local.file_match]: {{< relref "./local.file_match.md" >}} +[local.file_match]: ../local.file_match/ +{{< /admonition >}} `discovery.file` discovers targets from a set of files, similar to the [Prometheus file_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config). @@ -35,10 +28,10 @@ discovery.file "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------- | ------------------- | ------------------------------------------ |---------| -------- -`files` | `list(string)` | Files to read and discover targets from. | | yes -`refresh_interval` | `duration` | How often to sync targets. | "5m" | no +Name | Type | Description | Default | Required +-------------------|----------------|------------------------------------------|---------|--------- +`files` | `list(string)` | Files to read and discover targets from. | | yes +`refresh_interval` | `duration` | How often to sync targets. | "5m" | no The last path segment of each element in `files` may contain a single * that matches any character sequence, e.g. `my/path/tg_*.json`. @@ -47,7 +40,7 @@ The last path segment of each element in `files` may contain a single * that mat The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the filesystem. Each target includes the following labels: @@ -56,9 +49,8 @@ Each target includes the following labels: ## Component health -`discovery.file` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.file` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -102,8 +94,7 @@ values. ### Basic file discovery -This example discovers targets from a single file, scrapes them, and writes metrics -to a Prometheus remote write endpoint. +This example discovers targets from a single file, scrapes them, and writes metrics to a Prometheus remote write endpoint. ```river discovery.file "example" { @@ -134,8 +125,7 @@ Replace the following: ### File discovery with retained file path label -This example discovers targets from a wildcard file path, scrapes them, and writes metrics -to a Prometheus remote write endpoint. +This example discovers targets from a wildcard file path, scrapes them, and writes metrics to a Prometheus remote write endpoint. It also uses a relabeling rule to retain the file path as a label on each target. diff --git a/docs/sources/flow/reference/components/discovery.gce.md b/docs/sources/reference/components/discovery.gce.md similarity index 57% rename from docs/sources/flow/reference/components/discovery.gce.md rename to docs/sources/reference/components/discovery.gce.md index 182a19dfac..d47300d1f9 100644 --- a/docs/sources/flow/reference/components/discovery.gce.md +++ b/docs/sources/reference/components/discovery.gce.md @@ -1,26 +1,24 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.gce/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.gce/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.gce/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.gce/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.gce/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.gce/ description: Learn about discovery.gce title: discovery.gce --- # discovery.gce -`discovery.gce` allows retrieving scrape targets from [Google Compute Engine](https://cloud.google.com/compute) (GCE) instances. The private IP address is used by default, but may be changed to the public IP address with relabeling. +`discovery.gce` allows retrieving scrape targets from [Google Compute Engine][] (GCE) instances. +The private IP address is used by default, but may be changed to the public IP address with relabeling. Credentials are discovered by the Google Cloud SDK default client by looking in the following places, preferring the first location found: -1. a JSON file specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable. -2. a JSON file in the well-known path `$HOME/.config/gcloud/application_default_credentials.json`. -3. fetched from the GCE metadata server. +1. A JSON file specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable. +2. A JSON file in the well-known path `$HOME/.config/gcloud/application_default_credentials.json`. +3. Fetched from the GCE metadata server. -If the Agent is running within GCE, the service account associated with the instance it is running on should have at least read-only permissions to the compute resources. If running outside of GCE make sure to create an appropriate service account and place the credential file in one of the expected locations. +If {{< param "PRODUCT_NAME" >}} is running within GCE, the service account associated with the instance it's running on should have at least read-only permissions to the compute resources. +If running outside of GCE make sure to create an appropriate service account and place the credential file in one of the expected locations. +[Google Compute Engine]: https://cloud.google.com/compute ## Usage @@ -35,23 +33,25 @@ discovery.gce "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`project` | `string` | The GCP Project.| | yes -`zone` | `string` | The zone of the scrape targets. | | yes -`filter` | `string` | Filter can be used optionally to filter the instance list by other criteria. | | no -`refresh_interval` | `duration` | Refresh interval to re-read the instance list. | `"60s"`| no -`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | `80`| no -`tag_separator` | `string` | The tag separator is used to separate the tags on concatenation. | `","`| no +Name | Type | Description | Default | Required +-------------------|------------|-------------------------------------------------------------------------------------------------------------------------|---------|--------- +`project` | `string` | The GCP Project. | | yes +`zone` | `string` | The zone of the scrape targets. | | yes +`filter` | `string` | Filter can be used optionally to filter the instance list by other criteria. | | no +`refresh_interval` | `duration` | Refresh interval to re-read the instance list. | `"60s"` | no +`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | `80` | no +`tag_separator` | `string` | The tag separator is used to separate the tags on concatenation. | `","` | no -For more information on the syntax of the `filter` argument, refer to Google's `filter` documentation for [Method: instances.list](https://cloud.google.com/compute/docs/reference/latest/instances/list). +For more information on the syntax of the `filter` argument, refer to Google's `filter` documentation for [Method: instances.list][]. + +[Method: instances.list]: https://cloud.google.com/compute/docs/reference/latest/instances/list ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|----------------------------------- `targets` | `list(map(string))` | The set of discovered GCE targets. Each target includes the following labels: @@ -73,9 +73,8 @@ Each target includes the following labels: ## Component health -`discovery.gce` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.gce` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -113,7 +112,7 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. - + ## Compatible components diff --git a/docs/sources/flow/reference/components/discovery.hetzner.md b/docs/sources/reference/components/discovery.hetzner.md similarity index 65% rename from docs/sources/flow/reference/components/discovery.hetzner.md rename to docs/sources/reference/components/discovery.hetzner.md index a18984696d..27637a983c 100644 --- a/docs/sources/flow/reference/components/discovery.hetzner.md +++ b/docs/sources/reference/components/discovery.hetzner.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.hetzner/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.hetzner/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.hetzner/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.hetzner/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.hetzner/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.hetzner/ description: Learn about discovery.hetzner title: discovery.hetzner --- @@ -29,49 +24,48 @@ discovery.hetzner "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`role` | `string` | Hetzner role of entities that should be discovered. | | yes -`port` | `int` | The port to scrape metrics from. | `80` | no -`refresh_interval` | `duration` | The time after which the servers are refreshed. | `"60s"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`role` | `string` | Hetzner role of entities that should be discovered. | | yes +`port` | `int` | The port to scrape metrics from. | `80` | no +`refresh_interval` | `duration` | The time after which the servers are refreshed. | `"60s"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no `role` must be one of `robot` or `hcloud`. At most, one of the following can be provided: - [`bearer_token` argument](#arguments). - - [`bearer_token_file` argument](#arguments). + - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.hetzner`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -80,26 +74,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|------------------------------------------------------------ `targets` | `list(map(string))` | The set of targets discovered from the Hetzner catalog API. Each target includes the following labels: diff --git a/docs/sources/flow/reference/components/discovery.http.md b/docs/sources/reference/components/discovery.http.md similarity index 54% rename from docs/sources/flow/reference/components/discovery.http.md rename to docs/sources/reference/components/discovery.http.md index 1ad2734eaf..11e723f6e4 100644 --- a/docs/sources/flow/reference/components/discovery.http.md +++ b/docs/sources/reference/components/discovery.http.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.http/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.http/ description: Learn about discovery.http title: discovery.http --- @@ -13,7 +8,9 @@ title: discovery.http `discovery.http` provides a flexible way to define targets by querying an external http endpoint. -It fetches targets from an HTTP endpoint containing a list of zero or more target definitions. The target must reply with an HTTP 200 response. The HTTP header Content-Type must be application/json, and the body must be valid JSON. +It fetches targets from an HTTP endpoint containing a list of zero or more target definitions. +The target must reply with an HTTP 200 response. +The HTTP header Content-Type must be `application/json`, and the body must be valid JSON. Example response body: @@ -31,7 +28,7 @@ Example response body: It is possible to use additional fields in the JSON to pass parameters to [prometheus.scrape][] such as the `metricsPath` and `scrape_interval`. -[prometheus.scrape]: {{< relref "./prometheus.scrape.md#technical-details" >}} +[prometheus.scrape]: ../prometheus.scrape/#technical-details As an example, the following will provide a target with a custom `metricsPath`, scrape interval, and timeout value: @@ -53,7 +50,7 @@ As an example, the following will provide a target with a custom `metricsPath`, ``` -It is also possible to append query parameters to the metrics path with the `__param_` syntax. +It's also possible to append query parameters to the metrics path with the `__param_` syntax. For example, the following will call a metrics path of `/health?target_data=prometheus`: @@ -76,7 +73,9 @@ For example, the following will call a metrics path of `/health?target_data=prom ``` -For more information on the potential labels you can use, see the [prometheus.scrape technical details][prometheus.scrape] section, or the [Prometheus Configuration](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) documentation. +For more information on the potential labels you can use, see the [prometheus.scrape technical details][prometheus.scrape] section, or the [Prometheus Configuration][] documentation. + +[Prometheus Configuration]: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config ## Usage @@ -90,18 +89,18 @@ discovery.http "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`url` | `string` | URL to scrape. | | yes -`refresh_interval` | `duration` | How often to refresh targets. | `"60s"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`url` | `string` | URL to scrape. | | yes +`refresh_interval` | `duration` | How often to refresh targets. | `"60s"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -112,24 +111,23 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.http`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -138,26 +136,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the filesystem. Each target includes the following labels: @@ -166,9 +164,8 @@ Each target includes the following labels: ## Component health -`discovery.http` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.http` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -180,7 +177,7 @@ values. ## Examples -This example will query a url every 15 seconds and expose targets that it finds: +This example will query a URL every 15 seconds and expose targets that it finds: ```river discovery.http "dynamic_targets" { diff --git a/docs/sources/flow/reference/components/discovery.ionos.md b/docs/sources/reference/components/discovery.ionos.md similarity index 73% rename from docs/sources/flow/reference/components/discovery.ionos.md rename to docs/sources/reference/components/discovery.ionos.md index 9bdaa6bc4d..4e4ee5e555 100644 --- a/docs/sources/flow/reference/components/discovery.ionos.md +++ b/docs/sources/reference/components/discovery.ionos.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.ionos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.ionos/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.ionos/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.ionos/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.ionos/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.ionos/ description: Learn about discovery.ionos title: discovery.ionos --- @@ -27,19 +22,19 @@ discovery.ionos "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`datacenter_id` | `string` | The unique ID of the data center. | | yes -`refresh_interval` | `duration` | The time after which the servers are refreshed. | `60s` | no -`port` | `int` | The port to scrape metrics from. | 80 | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`datacenter_id` | `string` | The unique ID of the data center. | | yes +`refresh_interval` | `duration` | The time after which the servers are refreshed. | `60s` | no +`port` | `int` | The port to scrape metrics from. | 80 | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -50,7 +45,7 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks @@ -76,19 +71,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -116,9 +111,8 @@ Each target includes the following labels: ## Component health -`discovery.ionos` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.ionos` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.kubelet.md b/docs/sources/reference/components/discovery.kubelet.md similarity index 60% rename from docs/sources/flow/reference/components/discovery.kubelet.md rename to docs/sources/reference/components/discovery.kubelet.md index f9fef4a856..0ed182e6d7 100644 --- a/docs/sources/flow/reference/components/discovery.kubelet.md +++ b/docs/sources/reference/components/discovery.kubelet.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.kubelet/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.kubelet/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.kubelet/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.kubelet/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kubelet/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.kubelet/ description: Learn about discovery.kubelet labels: stage: beta @@ -13,8 +8,7 @@ title: discovery.kubelet # discovery.kubelet -`discovery.kubelet` discovers Kubernetes pods running on the specified Kubelet -and exposes them as scrape targets. +`discovery.kubelet` discovers Kubernetes pods running on the specified Kubelet and exposes them as scrape targets. ## Usage @@ -26,29 +20,30 @@ discovery.kubelet "LABEL" { ## Requirements * The Kubelet must be reachable from the `grafana-agent` pod network. -* Follow the [Kubelet authorization](https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authorization) - documentation to configure authentication to the Kubelet API. +* Follow the [Kubelet authorization][] documentation to configure authentication to the Kubelet API. + +[Kubelet authorization]: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authorization ## Arguments The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`url` | `string` | URL of the Kubelet server. | "https://localhost:10250" | no -`refresh_interval` | `duration` | How often the Kubelet should be polled for scrape targets | `5s` | no -`namespaces` | `list(string)` | A list of namespaces to extract target pods from | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no - -The `namespaces` list limits the namespaces to discover resources in. If -omitted, all namespaces are searched. +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------------------------|--------- +`url` | `string` | URL of the Kubelet server. | "https://localhost:10250" | no +`refresh_interval` | `duration` | How often the Kubelet should be polled for scrape targets | `5s` | no +`namespaces` | `list(string)` | A list of namespaces to extract target pods from | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no + +The `namespaces` list limits the namespaces to discover resources in. +If omitted, all namespaces are searched. `discovery.kubelet` appends a `/pods` path to `url` to request the available pods. You can have additional paths in the `url`. @@ -63,24 +58,23 @@ For example, if `url` is `https://kubernetes.default.svc.cluster.local:443/api/v [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.kubelet`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -89,26 +83,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|---------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Kubelet API. Each target includes the following labels: @@ -118,43 +112,32 @@ Each target includes the following labels: * `__meta_kubernetes_pod_name`: The name of the pod object. * `__meta_kubernetes_pod_ip`: The pod IP of the pod object. * `__meta_kubernetes_pod_label_`: Each label from the pod object. -* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from - the pod object. -* `__meta_kubernetes_pod_annotation_`: Each annotation from the - pod object. -* `__meta_kubernetes_pod_annotationpresent_`: `true` for each - annotation from the pod object. -* `__meta_kubernetes_pod_container_init`: `true` if the container is an - `InitContainer`. -* `__meta_kubernetes_pod_container_name`: Name of the container the target - address points to. -* `__meta_kubernetes_pod_container_id`: ID of the container the target address - points to. The ID is in the form `://`. +* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from the pod object. +* `__meta_kubernetes_pod_annotation_`: Each annotation from the pod object. +* `__meta_kubernetes_pod_annotationpresent_`: `true` for each annotation from the pod object. +* `__meta_kubernetes_pod_container_init`: `true` if the container is an `InitContainer`. +* `__meta_kubernetes_pod_container_name`: Name of the container the target address points to. +* `__meta_kubernetes_pod_container_id`: ID of the container the target address points to. The ID is in the form `://`. * `__meta_kubernetes_pod_container_image`: The image the container is using. * `__meta_kubernetes_pod_container_port_name`: Name of the container port. * `__meta_kubernetes_pod_container_port_number`: Number of the container port. -* `__meta_kubernetes_pod_container_port_protocol`: Protocol of the container - port. -* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the pod's ready - state. -* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or - `Unknown` in the lifecycle. -* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled - onto. +* `__meta_kubernetes_pod_container_port_protocol`: Protocol of the container port. +* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the pod's ready state. +* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or `Unknown` in the lifecycle. +* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled onto. * `__meta_kubernetes_pod_host_ip`: The current host IP of the pod object. * `__meta_kubernetes_pod_uid`: The UID of the pod object. * `__meta_kubernetes_pod_controller_kind`: Object kind of the pod controller. * `__meta_kubernetes_pod_controller_name`: Name of the pod controller. -> **Note**: The Kubelet API used by this component is an internal API and therefore the -> data in the response returned from the API cannot be guaranteed between different versions -> of the Kubelet. +{{< admonition type="note" >}} +The Kubelet API used by this component is an internal API and therefore the data in the response returned from the API can't be guaranteed between different versions of the Kubelet. +{{< /admonition >}} ## Component health -`discovery.kubelet` is reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.kubelet` is reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.kubernetes.md b/docs/sources/reference/components/discovery.kubernetes.md similarity index 61% rename from docs/sources/flow/reference/components/discovery.kubernetes.md rename to docs/sources/reference/components/discovery.kubernetes.md index 95d1d69a97..f9136d2ee1 100644 --- a/docs/sources/flow/reference/components/discovery.kubernetes.md +++ b/docs/sources/reference/components/discovery.kubernetes.md @@ -1,23 +1,16 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kubernetes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.kubernetes/ description: Learn about discovery.kubernetes title: discovery.kubernetes --- # discovery.kubernetes -`discovery.kubernetes` allows you to find scrape targets from Kubernetes -resources. It watches cluster state, and ensures targets are continually synced -with what is currently running in your cluster. +`discovery.kubernetes` allows you to find scrape targets from Kubernetes resources. +It watches cluster state, and ensures targets are continually synced with what is currently running in your cluster. -If you supply no connection information, this component defaults to an -in-cluster configuration. A kubeconfig file or manual connection settings can be used -to override the defaults. +If you supply no connection information, this component defaults to an in-cluster configuration. +A kubeconfig file or manual connection settings can be used to override the defaults. ## Usage @@ -31,19 +24,19 @@ discovery.kubernetes "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`api_server` | `string` | URL of Kubernetes API server. | | no -`role` | `string` | Type of Kubernetes resource to query. | | yes -`kubeconfig_file` | `string` | Path of kubeconfig file to use for connecting to Kubernetes. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`api_server` | `string` | URL of Kubernetes API server. | | no +`role` | `string` | Type of Kubernetes resource to query. | | yes +`kubeconfig_file` | `string` | Path of kubeconfig file to use for connecting to Kubernetes. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -54,19 +47,14 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} -The `role` argument is required to specify what type of targets to discover. -`role` must be one of `node`, `pod`, `service`, `endpoints`, `endpointslice`, -or `ingress`. +The `role` argument is required to specify what type of targets to discover. `role` must be one of `node`, `pod`, `service`, `endpoints`, `endpointslice`, or `ingress`. ### node role -The `node` role discovers one target per cluster node with the address -defaulting to the HTTP port of the Kubelet daemon. The target address defaults -to the first existing address of the Kubernetes node object in the address type -order of `NodeInternalIP`, `NodeExternalIP`, `NodeLegacyHostIP`, and -`NodeHostName`. +The `node` role discovers one target per cluster node with the address defaulting to the HTTP port of the Kubelet daemon. +The target address defaults to the first existing address of the Kubernetes node object in the address type order of `NodeInternalIP`, `NodeExternalIP`, `NodeLegacyHostIP`, and `NodeHostName`. The following labels are included for discovered nodes: @@ -75,53 +63,39 @@ The following labels are included for discovered nodes: * `__meta_kubernetes_node_label_`: Each label from the node object. * `__meta_kubernetes_node_labelpresent_`: Set to `true` for each label from the node object. * `__meta_kubernetes_node_annotation_`: Each annotation from the node object. -* `__meta_kubernetes_node_annotationpresent_`: Set to `true` - for each annotation from the node object. -* `__meta_kubernetes_node_address_`: The first address for each - node address type, if it exists. +* `__meta_kubernetes_node_annotationpresent_`: Set to `true` for each annotation from the node object. +* `__meta_kubernetes_node_address_`: The first address for each node address type, if it exists. -In addition, the `instance` label for the node will be set to the node name as -retrieved from the API server. +In addition, the `instance` label for the node will be set to the node name as retrieved from the API server. ### service role The `service` role discovers a target for each service port for each service. -This is generally useful for externally monitoring a service. The address will -be set to the Kubernetes DNS name of the service and respective service port. +This is generally useful for externally monitoring a service. +The address will be set to the Kubernetes DNS name of the service and respective service port. The following labels are included for discovered services: * `__meta_kubernetes_namespace`: The namespace of the service object. -* `__meta_kubernetes_service_annotation_`: Each annotation from - the service object. -* `__meta_kubernetes_service_annotationpresent_`: `true` for - each annotation of the service object. -* `__meta_kubernetes_service_cluster_ip`: The cluster IP address of the - service. This does not apply to services of type `ExternalName`. -* `__meta_kubernetes_service_external_name`: The DNS name of the service. - This only applies to services of type `ExternalName`. -* `__meta_kubernetes_service_label_`: Each label from the service - object. -* `__meta_kubernetes_service_labelpresent_`: `true` for each label - of the service object. +* `__meta_kubernetes_service_annotation_`: Each annotation from the service object. +* `__meta_kubernetes_service_annotationpresent_`: `true` for each annotation of the service object. +* `__meta_kubernetes_service_cluster_ip`: The cluster IP address of the service. This does not apply to services of type `ExternalName`. +* `__meta_kubernetes_service_external_name`: The DNS name of the service. This only applies to services of type `ExternalName`. +* `__meta_kubernetes_service_label_`: Each label from the service object. +* `__meta_kubernetes_service_labelpresent_`: `true` for each label of the service object. * `__meta_kubernetes_service_name`: The name of the service object. -* `__meta_kubernetes_service_port_name`: Name of the service port for the - target. -* `__meta_kubernetes_service_port_number`: Number of the service port for the - target. -* `__meta_kubernetes_service_port_protocol`: Protocol of the service port for - the target. +* `__meta_kubernetes_service_port_name`: Name of the service port for the target. +* `__meta_kubernetes_service_port_number`: Number of the service port for the target. +* `__meta_kubernetes_service_port_protocol`: Protocol of the service port for the target. * `__meta_kubernetes_service_type`: The type of the service. ### pod role -The `pod` role discovers all pods and exposes their containers as targets. For -each declared port of a container, a single target is generated. +The `pod` role discovers all pods and exposes their containers as targets. +For each declared port of a container, a single target is generated. -If a container has no specified ports, a port-free target per container is -created. These targets must have a port manually injected using a -[`discovery.relabel` component][discovery.relabel] before metrics can be -collected from them. +If a container has no specified ports, a port-free target per container is created. +These targets must have a port manually injected using a [`discovery.relabel` component][discovery.relabel] before metrics can be collected from them. The following labels are included for discovered pods: @@ -129,29 +103,19 @@ The following labels are included for discovered pods: * `__meta_kubernetes_pod_name`: The name of the pod object. * `__meta_kubernetes_pod_ip`: The pod IP of the pod object. * `__meta_kubernetes_pod_label_`: Each label from the pod object. -* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from - the pod object. -* `__meta_kubernetes_pod_annotation_`: Each annotation from the - pod object. -* `__meta_kubernetes_pod_annotationpresent_`: `true` for each - annotation from the pod object. -* `__meta_kubernetes_pod_container_init`: `true` if the container is an - `InitContainer`. -* `__meta_kubernetes_pod_container_name`: Name of the container the target - address points to. -* `__meta_kubernetes_pod_container_id`: ID of the container the target address - points to. The ID is in the form `://`. +* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from the pod object. +* `__meta_kubernetes_pod_annotation_`: Each annotation from the pod object. +* `__meta_kubernetes_pod_annotationpresent_`: `true` for each annotation from the pod object. +* `__meta_kubernetes_pod_container_init`: `true` if the container is an `InitContainer`. +* `__meta_kubernetes_pod_container_name`: Name of the container the target address points to. +* `__meta_kubernetes_pod_container_id`: ID of the container the target address points to. The ID is in the form `://`. * `__meta_kubernetes_pod_container_image`: The image the container is using. * `__meta_kubernetes_pod_container_port_name`: Name of the container port. * `__meta_kubernetes_pod_container_port_number`: Number of the container port. -* `__meta_kubernetes_pod_container_port_protocol`: Protocol of the container - port. -* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the pod's ready - state. -* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or - `Unknown` in the lifecycle. -* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled - onto. +* `__meta_kubernetes_pod_container_port_protocol`: Protocol of the container port. +* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the pod's ready state. +* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or `Unknown` in the lifecycle. +* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled onto. * `__meta_kubernetes_pod_host_ip`: The current host IP of the pod object. * `__meta_kubernetes_pod_uid`: The UID of the pod object. * `__meta_kubernetes_pod_controller_kind`: Object kind of the pod controller. @@ -168,86 +132,58 @@ The following labels are included for discovered endpoints: * `__meta_kubernetes_namespace:` The namespace of the endpoints object. * `__meta_kubernetes_endpoints_name:` The names of the endpoints object. -* `__meta_kubernetes_endpoints_label_`: Each label from the - endpoints object. -* `__meta_kubernetes_endpoints_labelpresent_`: `true` for each label - from the endpoints object. -* The following labels are attached for all targets discovered directly from - the endpoints list: +* `__meta_kubernetes_endpoints_label_`: Each label from the endpoints object. +* `__meta_kubernetes_endpoints_labelpresent_`: `true` for each label from the endpoints object. +* The following labels are attached for all targets discovered directly from the endpoints list: * `__meta_kubernetes_endpoint_hostname`: Hostname of the endpoint. - * `__meta_kubernetes_endpoint_node_name`: Name of the node hosting the - endpoint. - * `__meta_kubernetes_endpoint_ready`: Set to `true` or `false` for the - endpoint's ready state. + * `__meta_kubernetes_endpoint_node_name`: Name of the node hosting the endpoint. + * `__meta_kubernetes_endpoint_ready`: Set to `true` or `false` for the endpoint's ready state. * `__meta_kubernetes_endpoint_port_name`: Name of the endpoint port. * `__meta_kubernetes_endpoint_port_protocol`: Protocol of the endpoint port. - * `__meta_kubernetes_endpoint_address_target_kind`: Kind of the endpoint - address target. - * `__meta_kubernetes_endpoint_address_target_name`: Name of the endpoint - address target. -* If the endpoints belong to a service, all labels of the `service` role - discovery are attached. -* For all targets backed by a pod, all labels of the `pod` role discovery are - attached. + * `__meta_kubernetes_endpoint_address_target_kind`: Kind of the endpoint address target. + * `__meta_kubernetes_endpoint_address_target_name`: Name of the endpoint address target. +* If the endpoints belong to a service, all labels of the `service` role discovery are attached. +* For all targets backed by a pod, all labels of the `pod` role discovery are attached. ### endpointslice role -The endpointslice role discovers targets from existing Kubernetes endpoint -slices. For each endpoint address referenced in the `EndpointSlice` object, one -target is discovered. If the endpoint is backed by a pod, all container ports -of a pod are discovered as targets even if they are not bound to an endpoint -port. +The endpointslice role discovers targets from existing Kubernetes endpoint slices. +For each endpoint address referenced in the `EndpointSlice` object, one target is discovered. +If the endpoint is backed by a Pod, all container ports of a Pod are discovered as targets even if they are not bound to an endpoint port. The following labels are included for discovered endpoint slices: * `__meta_kubernetes_namespace`: The namespace of the endpoints object. * `__meta_kubernetes_endpointslice_name`: The name of endpoint slice object. -* The following labels are attached for all targets discovered directly from - the endpoint slice list: - * `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the - referenced object. - * `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced - object. - * `__meta_kubernetes_endpointslice_address_type`: The IP protocol family of - the address of the target. - * `__meta_kubernetes_endpointslice_endpoint_conditions_ready`: Set to `true` - or `false` for the referenced endpoint's ready state. - * `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`: - Name of the node hosting the referenced endpoint. - * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: - `true` if the referenced object has a `kubernetes.io/hostname` annotation. +* The following labels are attached for all targets discovered directly from the endpoint slice list: + * `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the referenced object. + * `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced object. + * `__meta_kubernetes_endpointslice_address_type`: The IP protocol family of the address of the target. + * `__meta_kubernetes_endpointslice_endpoint_conditions_ready`: Set to `true` or `false` for the referenced endpoint's ready state. + * `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`: Name of the node hosting the referenced endpoint. + * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: `true` if the referenced object has a `kubernetes.io/hostname` annotation. * `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint. - * `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced - endpoint. - * `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced - endpoint. -* If the endpoints belong to a service, all labels of the `service` role - discovery are attached. -* For all targets backed by a pod, all labels of the `pod` role discovery are - attached. + * `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint. + * `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint. +* If the endpoints belong to a service, all labels of the `service` role discovery are attached. +* For all targets backed by a pod, all labels of the `pod` role discovery are attached. ### ingress role -The `ingress` role discovers a target for each path of each ingress. This is -generally useful for externally monitoring an ingress. The address will be set -to the host specified in the Kubernetes `Ingress`'s `spec` block. +The `ingress` role discovers a target for each path of each ingress. +This is generally useful for externally monitoring an ingress. +The address will be set to the host specified in the Kubernetes `Ingress`'s `spec` block. The following labels are included for discovered ingress objects: * `__meta_kubernetes_namespace`: The namespace of the ingress object. * `__meta_kubernetes_ingress_name`: The name of the ingress object. -* `__meta_kubernetes_ingress_label_`: Each label from the ingress - object. -* `__meta_kubernetes_ingress_labelpresent_`: `true` for each label - from the ingress object. -* `__meta_kubernetes_ingress_annotation_`: Each annotation from - the ingress object. -* `__meta_kubernetes_ingress_annotationpresent_`: `true` for each - annotation from the ingress object. -* `__meta_kubernetes_ingress_class_name`: Class name from ingress spec, if - present. -* `__meta_kubernetes_ingress_scheme`: Protocol scheme of ingress, `https` if TLS - config is set. Defaults to `http`. +* `__meta_kubernetes_ingress_label_`: Each label from the ingress object. +* `__meta_kubernetes_ingress_labelpresent_`: `true` for each label from the ingress object. +* `__meta_kubernetes_ingress_annotation_`: Each annotation from the ingress object. +* `__meta_kubernetes_ingress_annotationpresent_`: `true` for each annotation from the ingress object. +* `__meta_kubernetes_ingress_class_name`: Class name from ingress spec, if present. +* `__meta_kubernetes_ingress_scheme`: Protocol scheme of ingress, `https` if TLS config is set. Defaults to `http`. * `__meta_kubernetes_ingress_path`: Path from ingress spec. Defaults to /. ## Blocks @@ -255,20 +191,19 @@ The following labels are included for discovered ingress objects: The following blocks are supported inside the definition of `discovery.kubernetes`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -namespaces | [namespaces][] | Information about which Kubernetes namespaces to search. | no -selectors | [selectors][] | Information about which Kubernetes namespaces to search. | no -attach_metadata | [attach_metadata][] | Optional metadata to attach to discovered targets. | no -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no - -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +Hierarchy | Block | Description | Required +--------------------|---------------------|----------------------------------------------------------|--------- +namespaces | [namespaces][] | Information about which Kubernetes namespaces to search. | no +selectors | [selectors][] | Information about which Kubernetes namespaces to search. | no +attach_metadata | [attach_metadata][] | Optional metadata to attach to discovered targets. | no +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no + +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [namespaces]: #namespaces-block [selectors]: #selectors-block @@ -280,81 +215,79 @@ an `oauth2` block. ### namespaces block -The `namespaces` block limits the namespaces to discover resources in. If -omitted, all namespaces are searched. +The `namespaces` block limits the namespaces to discover resources in. +If omitted, all namespaces are searched. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`own_namespace` | `bool` | Include the namespace {{< param "PRODUCT_NAME" >}} is running in. | | no -`names` | `list(string)` | List of namespaces to search. | | no +Name | Type | Description | Default | Required +----------------|----------------|-------------------------------------------------------------------|---------|--------- +`own_namespace` | `bool` | Include the namespace {{< param "PRODUCT_NAME" >}} is running in. | | no +`names` | `list(string)` | List of namespaces to search. | | no ### selectors block -The `selectors` block contains optional label and field selectors to limit the -discovery process to a subset of resources. +The `selectors` block contains optional label and field selectors to limit the discovery process to a subset of resources. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`role` | `string` | Role of the selector. | | yes -`label`| `string` | Label selector string. | | no -`field` | `string` | Field selector string. | | no +Name | Type | Description | Default | Required +--------|----------|------------------------|---------|--------- +`role` | `string` | Role of the selector. | | yes +`label` | `string` | Label selector string. | | no +`field` | `string` | Field selector string. | | no -See Kubernetes' documentation for [Field selectors][] and [Labels and -selectors][] to learn more about the possible filters that can be used. +See Kubernetes' documentation for [Field selectors][] and [Labels and selectors][] to learn more about the possible filters that can be used. -The endpoints role supports pod, service, and endpoints selectors. -The pod role supports node selectors when configured with `attach_metadata: {node: true}`. +The endpoints role supports Pod, service, and endpoints selectors. +The Pod role supports node selectors when configured with `attach_metadata: {node: true}`. Other roles only support selectors matching the role itself (e.g. node role can only contain node selectors). -> **Note**: Using multiple `discovery.kubernetes` components with different -> selectors may result in a bigger load against the Kubernetes API. -> -> Selectors are recommended for retrieving a small set of resources in a very -> large cluster. Smaller clusters are recommended to avoid selectors in favor -> of filtering with [a `discovery.relabel` component][discovery.relabel] -> instead. +{{< admonition type="note" >}} +Using multiple `discovery.kubernetes` components with different selectors may result in a bigger load against the Kubernetes API. + +Selectors are recommended for retrieving a small set of resources in a very large cluster. +Smaller clusters are recommended to avoid selectors in favor of filtering with [a `discovery.relabel` component][discovery.relabel] instead. + +[discovery.relabel]: ../discovery.relabel/ +{{< /admonition >}} [Field selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ [Labels and selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -[discovery.relabel]: {{< relref "./discovery.relabel.md" >}} ### attach_metadata block -The `attach_metadata` block allows to attach node metadata to discovered -targets. Valid for roles: pod, endpoints, endpointslice. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`node` | `bool` | Attach node metadata. | | no +The `attach_metadata` block allows to attach node metadata to discovered targets. +Valid for roles: pod, endpoints, endpointslice. + +Name | Type | Description | Default | Required +-------|--------|-----------------------|---------|--------- +`node` | `bool` | Attach node metadata. | | no ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|------------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Kubernetes API. ## Component health -`discovery.kubernetes` is reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.kubernetes` is reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -398,7 +331,7 @@ Replace the following: ### Kubeconfig authentication -This example uses a kubeconfig file to authenticate to the Kubernetes API: +This example uses a Kubeconfig file to authenticate to the Kubernetes API: ```river discovery.kubernetes "k8s_pods" { @@ -473,7 +406,7 @@ This configuration could be useful if you are running {{< param "PRODUCT_ROOT_NA {{< admonition type="note" >}} This example assumes you have used Helm chart to deploy {{< param "PRODUCT_NAME" >}} in Kubernetes and sets `HOSTNAME` to the Kubernetes host name. -If you have a custom Kubernetes deployment, you must adapt this example to your configuration. +If you have a custom Kubernetes Deployment, you must adapt this example to your configuration. {{< /admonition >}} ```river diff --git a/docs/sources/flow/reference/components/discovery.kuma.md b/docs/sources/reference/components/discovery.kuma.md similarity index 54% rename from docs/sources/flow/reference/components/discovery.kuma.md rename to docs/sources/reference/components/discovery.kuma.md index e4eb17e69b..720a0ed19b 100644 --- a/docs/sources/flow/reference/components/discovery.kuma.md +++ b/docs/sources/reference/components/discovery.kuma.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.kuma/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.kuma/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.kuma/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.kuma/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kuma/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.kuma/ description: Learn about discovery.kuma title: discovery.kuma --- @@ -27,43 +22,42 @@ discovery.kuma "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | -------------------------------------------------------------- | ------- | -------- -`server` | `string` | Address of the Kuma Control Plane's MADS xDS server. | | yes -`refresh_interval` | `duration` | The time to wait between polling update requests. | `"30s"` | no -`fetch_timeout` | `duration` | The time after which the monitoring assignments are refreshed. | `"2m"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`server` | `string` | Address of the Kuma Control Plane's MADS xDS server. | | yes +`refresh_interval` | `duration` | The time to wait between polling update requests. | `"30s"` | no +`fetch_timeout` | `duration` | The time after which the monitoring assignments are refreshed. | `"2m"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). - - [`bearer_token_file` argument](#arguments). + - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} The following blocks are supported inside the definition of `discovery.kuma`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -72,19 +66,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -92,11 +86,10 @@ an `oauth2` block. The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Kuma API. -The following meta labels are available on targets and can be used by the -discovery.relabel component: +The following meta labels are available on targets and can be used by the discovery.relabel component: * `__meta_kuma_mesh`: the name of the proxy's Mesh * `__meta_kuma_dataplane`: the name of the proxy * `__meta_kuma_service`: the name of the proxy's associated Service @@ -104,9 +97,8 @@ discovery.relabel component: ## Component health -`discovery.kuma` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.kuma` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.lightsail.md b/docs/sources/reference/components/discovery.lightsail.md similarity index 50% rename from docs/sources/flow/reference/components/discovery.lightsail.md rename to docs/sources/reference/components/discovery.lightsail.md index 81688b35a5..c6f959ff54 100644 --- a/docs/sources/flow/reference/components/discovery.lightsail.md +++ b/docs/sources/reference/components/discovery.lightsail.md @@ -4,7 +4,7 @@ aliases: - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.lightsail/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.lightsail/ - /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.lightsail/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.lightsail/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.lightsail/ description: Learn about discovery.lightsail title: discovery.lightsail --- @@ -24,24 +24,24 @@ discovery.lightsail "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | Custom endpoint to be used.| | no -`region` | `string` | The AWS region. If blank, the region from the instance metadata is used. | | no -`access_key` | `string` | The AWS API key ID. If blank, the environment variable `AWS_ACCESS_KEY_ID` is used. | | no -`secret_key` | `string` | The AWS API key secret. If blank, the environment variable `AWS_SECRET_ACCESS_KEY` is used. | | no -`profile` | `string` | Named AWS profile used to connect to the API. | | no -`role_arn` | `string` | AWS Role ARN, an alternative to using AWS API keys. | | no -`refresh_interval` | `string` | Refresh interval to re-read the instance list. | 60s | no -`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | 80 | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|-------------------------------------------------------------------------------------------------------------------------|---------|--------- +`endpoint` | `string` | Custom endpoint to be used. | | no +`region` | `string` | The AWS region. If blank, the region from the instance metadata is used. | | no +`access_key` | `string` | The AWS API key ID. If blank, the environment variable `AWS_ACCESS_KEY_ID` is used. | | no +`secret_key` | `string` | The AWS API key secret. If blank, the environment variable `AWS_SECRET_ACCESS_KEY` is used. | | no +`profile` | `string` | Named AWS profile used to connect to the API. | | no +`role_arn` | `string` | AWS Role ARN, an alternative to using AWS API keys. | | no +`refresh_interval` | `string` | Refresh interval to re-read the instance list. | 60s | no +`port` | `int` | The port to scrape metrics from. If using the public IP address, this must instead be specified in the relabeling rule. | 80 | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -52,24 +52,23 @@ At most, one of the following can be provided: [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.lightsail`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -78,26 +77,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|----------------------------------------- `targets` | `list(map(string))` | The set of discovered Lightsail targets. Each target includes the following labels: @@ -116,9 +115,8 @@ Each target includes the following labels: ## Component health -`discovery.lightsail` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.lightsail` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.linode.md b/docs/sources/reference/components/discovery.linode.md similarity index 65% rename from docs/sources/flow/reference/components/discovery.linode.md rename to docs/sources/reference/components/discovery.linode.md index 9b0bffc553..175a241e6c 100644 --- a/docs/sources/flow/reference/components/discovery.linode.md +++ b/docs/sources/reference/components/discovery.linode.md @@ -1,17 +1,16 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.linode/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.linode/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.linode/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.linode/ description: Learn about discovery.linode title: discovery.linode --- # discovery.linode -`discovery.linode` allows you to retrieve scrape targets from [Linode's](https://www.linode.com/) Linode APIv4. +`discovery.linode` allows you to retrieve scrape targets from [Linode's][] Linode APIv4. This service discovery uses the public IPv4 address by default, but that can be changed with relabeling. +[Linode's]: https://www.linode.com/ + ## Usage ```river @@ -28,19 +27,19 @@ The linode APIv4 Token must be created with the scopes: `linodes:read_only`, `ip The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`refresh_interval` | `duration` | The time to wait between polling update requests. | `"60s"` | no -`port` | `int` | Port that metrics are scraped from. | `80` | no -`tag_separator` | `string` | The string by which Linode Instance tags are joined into the tag label. | `,` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`refresh_interval` | `duration` | The time to wait between polling update requests. | `"60s"` | no +`port` | `int` | Port that metrics are scraped from. | `80` | no +`tag_separator` | `string` | The string by which Linode Instance tags are joined into the tag label. | `,` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -49,24 +48,23 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.linode`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -75,19 +73,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -95,11 +93,10 @@ an `oauth2` block. The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Linode API. -The following meta labels are available on targets and can be used by the -discovery.relabel component: +The following meta labels are available on targets and can be used by the discovery.relabel component: * `__meta_linode_instance_id`: the id of the Linode instance * `__meta_linode_instance_label`: the label of the Linode instance @@ -122,9 +119,8 @@ discovery.relabel component: ## Component health -`discovery.linode` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.linode` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.marathon.md b/docs/sources/reference/components/discovery.marathon.md similarity index 69% rename from docs/sources/flow/reference/components/discovery.marathon.md rename to docs/sources/reference/components/discovery.marathon.md index 69e8630b04..b13f4d728c 100644 --- a/docs/sources/flow/reference/components/discovery.marathon.md +++ b/docs/sources/reference/components/discovery.marathon.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.marathon/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.marathon/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.marathon/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.marathon/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.marathon/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.marathon/ description: Learn about discovery.marathon title: discovery.marathon --- @@ -25,20 +20,20 @@ discovery.marathon "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`servers` | `list(string)` | List of Marathon servers. | | yes -`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `"30s"` | no -`auth_token` | `secret` | Auth token to authenticate with. | | no -`auth_token_file` | `string` | File containing an auth token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`servers` | `list(string)` | List of Marathon servers. | | yes +`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `"30s"` | no +`auth_token` | `secret` | Auth token to authenticate with. | | no +`auth_token_file` | `string` | File containing an auth token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`auth_token` argument](#arguments). @@ -51,7 +46,7 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks @@ -66,9 +61,8 @@ The following blocks are supported inside the definition of | oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | | tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -77,19 +71,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -111,9 +105,8 @@ Each target includes the following labels: ## Component health -`discovery.marathon` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.marathon` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.nerve.md b/docs/sources/reference/components/discovery.nerve.md similarity index 86% rename from docs/sources/flow/reference/components/discovery.nerve.md rename to docs/sources/reference/components/discovery.nerve.md index 04812c356b..9b7ebca64b 100644 --- a/docs/sources/flow/reference/components/discovery.nerve.md +++ b/docs/sources/reference/components/discovery.nerve.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.nerve/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.nerve/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.nerve/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.nerve/ description: Learn about discovery.nerve title: discovery.nerve --- @@ -33,13 +30,11 @@ Name | Type | Description | Def `timeout` | `duration` | The timeout to use. | `"10s"` | no -Each element in the `path` list can either point to a single service, or to the -root of a tree of services. +Each element in the `path` list can either point to a single service, or to the root of a tree of services. ## Blocks -The `discovery.nerve` component does not support any blocks, and is configured -fully through arguments. +The `discovery.nerve` component does not support any blocks, and is configured fully through arguments. ## Exported fields @@ -49,8 +44,7 @@ Name | Type | Description --------- | ------------------- | ----------- `targets` | `list(map(string))` | The set of targets discovered from Nerve's API. -The following meta labels are available on targets and can be used by the -discovery.relabel component +The following meta labels are available on targets and can be used by the discovery.relabel component * `__meta_nerve_path`: the full path to the endpoint node in Zookeeper * `__meta_nerve_endpoint_host`: the host of the endpoint * `__meta_nerve_endpoint_port`: the port of the endpoint @@ -58,9 +52,8 @@ discovery.relabel component ## Component health -`discovery.nerve` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.nerve` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.nomad.md b/docs/sources/reference/components/discovery.nomad.md similarity index 53% rename from docs/sources/flow/reference/components/discovery.nomad.md rename to docs/sources/reference/components/discovery.nomad.md index 372306a4e2..71eeb221ee 100644 --- a/docs/sources/flow/reference/components/discovery.nomad.md +++ b/docs/sources/reference/components/discovery.nomad.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.nomad/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.nomad/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.nomad/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.nomad/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.nomad/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.nomad/ description: Learn about discovery.nomad title: discovery.nomad --- @@ -24,50 +19,49 @@ discovery.nomad "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ----------------------- | -------- -`server` | `string` | Address of nomad server. | `http://localhost:4646` | no -`namespace` | `string` | Nomad namespace to use. | `default` | no -`region` | `string` | Nomad region to use. | `global` | no -`allow_stale` | `bool` | Allow reading from non-leader nomad instances. | `true` | no -`tag_separator` | `string` | Seperator to join nomad tags into Prometheus labels. | `,` | no -`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"30s"` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|-------------------------|--------- +`server` | `string` | Address of nomad server. | `http://localhost:4646` | no +`namespace` | `string` | Nomad namespace to use. | `default` | no +`region` | `string` | Nomad region to use. | `global` | no +`allow_stale` | `bool` | Allow reading from non-leader nomad instances. | `true` | no +`tag_separator` | `string` | Seperator to join nomad tags into Prometheus labels. | `,` | no +`refresh_interval` | `duration` | Frequency to refresh list of containers. | `"30s"` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). - - [`bearer_token_file` argument](#arguments). + - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.nomad`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -76,26 +70,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|----------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the nomad server. Each target includes the following labels: @@ -112,9 +106,8 @@ Each target includes the following labels: ## Component health -`discovery.nomad` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.nomad` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.openstack.md b/docs/sources/reference/components/discovery.openstack.md similarity index 54% rename from docs/sources/flow/reference/components/discovery.openstack.md rename to docs/sources/reference/components/discovery.openstack.md index 6d26908602..894c66101a 100644 --- a/docs/sources/flow/reference/components/discovery.openstack.md +++ b/docs/sources/reference/components/discovery.openstack.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.openstack/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.openstack/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.openstack/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.openstack/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.openstack/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.openstack/ description: Learn about discovery.openstack title: discovery.openstack --- @@ -28,33 +23,35 @@ discovery.openstack "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required -------------------- | ---------- | ---------------------------------------------------------------------- | -------------------- | -------- -`role` | `string` | Role of the discovered targets. | | yes -`region` | `string` | OpenStack region. | | yes -`identity_endpoint` | `string` | Specifies the HTTP endpoint that is required to work with te Identity API of the appropriate version | | no -`username` | `string` | OpenStack username for the Identity V2 and V3 APIs. | | no -`userid` | `string` | OpenStack userid for the Identity V2 and V3 APIs. | | no -`password` | `secret` | Password for the Identity V2 and V3 APIs. | | no -`domain_name` | `string` | OpenStack domain name for the Identity V2 and V3 APIs. | | no -`domain_id` | `string` | OpenStack domain ID for the Identity V2 and V3 APIs. | | no -`project_name` | `string` | OpenStack project name for the Identity V2 and V3 APIs. | | no -`project_id` | `string` | OpenStack project ID for the Identity V2 and V3 APIs. | | no -`application_credential_name` | `string` | OpenStack application credential name for the Identity V2 and V3 APIs. | | no -`application_credential_id` | `string` | OpenStack application credential ID for the Identity V2 and V3 APIs. | | no -`application_credential_secret` | `secret` | OpenStack application credential secret for the Identity V2 and V3 APIs. | | no -`all_tenants` | `bool` | Whether the service discovery should list all instances for all projects. | `false` | no -`refresh_interval` | `duration`| Refresh interval to re-read the instance list. | `60s` | no -`port` | `int` | The port to scrape metrics from. | `80` | no -`availability` | `string` | The availability of the endpoint to connect to. | `public` | no +Name | Type | Description | Default | Required +--------------------------------|------------|------------------------------------------------------------------------------------------------------|----------|--------- +`role` | `string` | Role of the discovered targets. | | yes +`region` | `string` | OpenStack region. | | yes +`identity_endpoint` | `string` | Specifies the HTTP endpoint that is required to work with te Identity API of the appropriate version | | no +`username` | `string` | OpenStack username for the Identity V2 and V3 APIs. | | no +`userid` | `string` | OpenStack userid for the Identity V2 and V3 APIs. | | no +`password` | `secret` | Password for the Identity V2 and V3 APIs. | | no +`domain_name` | `string` | OpenStack domain name for the Identity V2 and V3 APIs. | | no +`domain_id` | `string` | OpenStack domain ID for the Identity V2 and V3 APIs. | | no +`project_name` | `string` | OpenStack project name for the Identity V2 and V3 APIs. | | no +`project_id` | `string` | OpenStack project ID for the Identity V2 and V3 APIs. | | no +`application_credential_name` | `string` | OpenStack application credential name for the Identity V2 and V3 APIs. | | no +`application_credential_id` | `string` | OpenStack application credential ID for the Identity V2 and V3 APIs. | | no +`application_credential_secret` | `secret` | OpenStack application credential secret for the Identity V2 and V3 APIs. | | no +`all_tenants` | `bool` | Whether the service discovery should list all instances for all projects. | `false` | no +`refresh_interval` | `duration` | Refresh interval to re-read the instance list. | `60s` | no +`port` | `int` | The port to scrape metrics from. | `80` | no +`availability` | `string` | The availability of the endpoint to connect to. | `public` | no `role` must be one of `hypervisor` or `instance`. `username` is required if using Identity V2 API. In Identity V3, either `userid` or a combination of `username` and `domain_id` or `domain_name` are needed. -`project_id` and `project_name` fields are optional for the Identity V2 API. Some providers allow you to specify a `project_name` instead of the `project_id`. Some require both. +`project_id` and `project_name` fields are optional for the Identity V2 API. +Some providers allow you to specify a `project_name` instead of the `project_id`. Some require both. -`application_credential_id` or `application_credential_name` fields are required if using an application credential to authenticate. Some providers allow you to create an application credential to authenticate rather than a password. +`application_credential_id` or `application_credential_name` fields are required if using an application credential to authenticate. +Some providers allow you to create an application credential to authenticate rather than a password. `application_credential_secret` field is required if using an application credential to authenticate. @@ -65,22 +62,22 @@ Name | Type | Description ## Blocks The following blocks are supported inside the definition of `discovery.openstack`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +-----------|----------------|------------------------------------------------------|--------- tls_config | [tls_config][] | TLS configuration for requests to the OpenStack API. | no [tls_config]: #tls_config-block ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|------------------------------------------------------ `targets` | `list(map(string))` | The set of targets discovered from the OpenStack API. #### `hypervisor` @@ -115,9 +112,8 @@ interface. ## Component health -`discovery.openstack` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.openstack` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.ovhcloud.md b/docs/sources/reference/components/discovery.ovhcloud.md similarity index 88% rename from docs/sources/flow/reference/components/discovery.ovhcloud.md rename to docs/sources/reference/components/discovery.ovhcloud.md index 2733256ee1..5582f9d121 100644 --- a/docs/sources/flow/reference/components/discovery.ovhcloud.md +++ b/docs/sources/reference/components/discovery.ovhcloud.md @@ -1,20 +1,15 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.ovhcloud/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.ovhcloud/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.ovhcloud/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.ovhcloud/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.ovhcloud/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.ovhcloud/ description: Learn about discovery.ovhcloud title: discovery.ovhcloud --- # discovery.ovhcloud -`discovery.ovhcloud` discovers scrape targets from OVHcloud's [dedicated servers][] and [VPS][] using their [API][]. -{{< param "PRODUCT_ROOT_NAME" >}} will periodically check the REST endpoint and create a target for every discovered server. -The public IPv4 address will be used by default - if there's none, the IPv6 address will be used. -This may be changed via relabeling with `discovery.relabel`. +`discovery.ovhcloud` discovers scrape targets from OVHcloud's [dedicated servers][] and [VPS][] using their [API][]. +{{< param "PRODUCT_ROOT_NAME" >}} will periodically check the REST endpoint and create a target for every discovered server. +The public IPv4 address will be used by default - if there's none, the IPv6 address will be used. +This may be changed via relabeling with `discovery.relabel`. For OVHcloud's [public cloud][] instances you can use `discovery.openstack`. [API]: https://api.ovh.com/ @@ -57,7 +52,7 @@ service | `string` | Service of the targets to retrieve. The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|----------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the OVHcloud API. Multiple meta labels are available on `targets` and can be used by the `discovery.relabel` component. @@ -99,9 +94,8 @@ Multiple meta labels are available on `targets` and can be used by the `discover ## Component health -`discovery.ovhcloud` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.ovhcloud` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.process.md b/docs/sources/reference/components/discovery.process.md similarity index 83% rename from docs/sources/flow/reference/components/discovery.process.md rename to docs/sources/reference/components/discovery.process.md index 6749abe65a..12df00f41f 100644 --- a/docs/sources/flow/reference/components/discovery.process.md +++ b/docs/sources/reference/components/discovery.process.md @@ -1,17 +1,12 @@ --- -aliases: - - /docs/grafana-cloud/agent/flow/reference/components/discovery.process/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.process/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.process/ - - /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.process/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.process/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.process/ description: Learn about discovery.process title: discovery.process --- # discovery.process -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `discovery.process` discovers processes running on the local Linux OS. @@ -31,10 +26,10 @@ discovery.process "LABEL" { The following arguments are supported: -| Name | Type | Description | Default | Required | -|--------------------|---------------------|-----------------------------------------------------------------------------------------|---------|----------| +| Name | Type | Description | Default | Required | +|--------------------|---------------------|------------------------------------------------------------------------------------------|---------|----------| | `join` | `list(map(string))` | Join external targets to discovered processes targets based on `__container_id__` label. | | no | -| `refresh_interval` | `duration` | How often to sync targets. | "60s" | no | +| `refresh_interval` | `duration` | How often to sync targets. | "60s" | no | ### Targets joining @@ -97,8 +92,8 @@ The resulting targets are: The following blocks are supported inside the definition of `discovery.process`: -| Hierarchy | Block | Description | Required | -|-----------------|---------------------|-----------------------------------------------|----------| +| Hierarchy | Block | Description | Required | +|-----------------|---------------------|------------------------------------------------|----------| | discover_config | [discover_config][] | Configures which process metadata to discover. | no | [discover_config]: #discover_config-block @@ -109,13 +104,13 @@ The `discover_config` block describes which process metadata to discover. The following arguments are supported: -| Name | Type | Description | Default | Required | -|----------------|--------|-----------------------------------------------------------------|---------|----------| -| `exe` | `bool` | A flag to enable discovering `__meta_process_exe` label. | true | no | +| Name | Type | Description | Default | Required | +|----------------|--------|------------------------------------------------------------------|---------|----------| +| `exe` | `bool` | A flag to enable discovering `__meta_process_exe` label. | true | no | | `cwd` | `bool` | A flag to enable discovering `__meta_process_cwd` label. | true | no | | `commandline` | `bool` | A flag to enable discovering `__meta_process_commandline` label. | true | no | | `uid` | `bool` | A flag to enable discovering `__meta_process_uid`: label. | true | no | -| `username` | `bool` | A flag to enable discovering `__meta_process_username`: label. | true | no | +| `username` | `bool` | A flag to enable discovering `__meta_process_username`: label. | true | no | | `container_id` | `bool` | A flag to enable discovering `__container_id__` label. | true | no | ## Exported fields @@ -134,14 +129,12 @@ Each target includes the following labels: * `__meta_process_commandline`: The process command line. Taken from `/proc//cmdline`. * `__meta_process_uid`: The process UID. Taken from `/proc//status`. * `__meta_process_username`: The process username. Taken from `__meta_process_uid` and `os/user/LookupID`. -* `__container_id__`: The container ID. Taken from `/proc//cgroup`. If the process is not running in a container, - this label is not set. +* `__container_id__`: The container ID. Taken from `/proc//cgroup`. If the process is not running in a container, this label is not set. ## Component health -`discovery.process` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.process` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.puppetdb.md b/docs/sources/reference/components/discovery.puppetdb.md similarity index 75% rename from docs/sources/flow/reference/components/discovery.puppetdb.md rename to docs/sources/reference/components/discovery.puppetdb.md index 01e0ac9269..3ef7fc40e3 100644 --- a/docs/sources/flow/reference/components/discovery.puppetdb.md +++ b/docs/sources/reference/components/discovery.puppetdb.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.puppetdb/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.puppetdb/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.puppetdb/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.puppetdb/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.puppetdb/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.puppetdb/ description: Learn about discovery.puppetdb title: discovery.puppetdb --- @@ -49,31 +44,30 @@ Name | Type | Description At most, one of the following can be provided: - [`bearer_token` argument](#arguments). - - [`bearer_token_file` argument](#arguments). + - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.puppetdb`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -authorization | [authorization][] | Configure generic authorization to the endpoint. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|----------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [basic_auth]: #basic_auth-block [authorization]: #authorization-block @@ -82,26 +76,26 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|--------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from puppetdb. Each target includes the following labels: @@ -119,9 +113,8 @@ Each target includes the following labels: ## Component health -`discovery.puppetdb` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.puppetdb` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.relabel.md b/docs/sources/reference/components/discovery.relabel.md similarity index 53% rename from docs/sources/flow/reference/components/discovery.relabel.md rename to docs/sources/reference/components/discovery.relabel.md index cd928ffb5a..2cb341cfdb 100644 --- a/docs/sources/flow/reference/components/discovery.relabel.md +++ b/docs/sources/reference/components/discovery.relabel.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.relabel/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.relabel/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.relabel/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/ description: Learn about discovery.relabel title: discovery.relabel --- @@ -13,29 +8,20 @@ title: discovery.relabel In Flow, targets are defined as sets of key-value pairs called _labels_. -`discovery.relabel` rewrites the label set of the input targets by applying one -or more relabeling rules. If no rules are defined, then the input targets are -exported as-is. - -The most common use of `discovery.relabel` is to filter targets or standardize -the target label set that is passed to a downstream component. The `rule` -blocks are applied to the label set of each target in order of their appearance -in the configuration file. The configured rules can be retrieved by calling the -function in the `rules` export field. - -Target labels which start with a double underscore `__` are considered -internal, and may be removed by other Flow components prior to telemetry -collection. To retain any of these labels, use a `labelmap` action to remove -the prefix, or remap them to a different name. Service discovery mechanisms -usually group their labels under `__meta_*`. For example, the -discovery.kubernetes component populates a set of `__meta_kubernetes_*` labels -to provide information about the discovered Kubernetes resources. If a -relabeling rule needs to store a label value temporarily, for example as the -input to a subsequent step, use the `__tmp` label name prefix, as it is -guaranteed to never be used. - -Multiple `discovery.relabel` components can be specified by giving them -different labels. +`discovery.relabel` rewrites the label set of the input targets by applying one or more relabeling rules. +If no rules are defined, then the input targets are exported as-is. + +The most common use of `discovery.relabel` is to filter targets or standardize the target label set that's passed to a downstream component. +The `rule` blocks are applied to the label set of each target in order of their appearance in the configuration file. +The configured rules can be retrieved by calling the function in the `rules` export field. + +Target labels which start with a double underscore `__` are considered internal, and may be removed by other components prior to telemetry collection. +To retain any of these labels, use a `labelmap` action to remove the prefix, or remap them to a different name. +Service discovery mechanisms usually group their labels under `__meta_*`. +For example, the discovery.kubernetes component populates a set of `__meta_kubernetes_*` labels to provide information about the discovered Kubernetes resources. +If a relabeling rule needs to store a label value temporarily, for example as the input to a subsequent step, use the `__tmp` label name prefix, as it's guaranteed to never be used. + +Multiple `discovery.relabel` components can be specified by giving them different labels. ## Usage @@ -55,39 +41,38 @@ discovery.relabel "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`targets` | `list(map(string))` | Targets to relabel | | yes +Name | Type | Description | Default | Required +----------|---------------------|--------------------|---------|--------- +`targets` | `list(map(string))` | Targets to relabel | | yes ## Blocks The following blocks are supported inside the definition of `discovery.relabel`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -rule | [rule][] | Relabeling rules to apply to targets. | no +Hierarchy | Block | Description | Required +----------|----------|---------------------------------------|--------- +rule | [rule][] | Relabeling rules to apply to targets. | no [rule]: #rule-block ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +---------|---------------------|---------------------------------------------- `output` | `list(map(string))` | The set of targets after applying relabeling. -`rules` | `RelabelRules` | The currently configured relabeling rules. +`rules` | `RelabelRules` | The currently configured relabeling rules. ## Component health -`discovery.relabel` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.relabel` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -122,7 +107,6 @@ discovery.relabel "keep_backend_only" { } ``` - ## Compatible components diff --git a/docs/sources/flow/reference/components/discovery.scaleway.md b/docs/sources/reference/components/discovery.scaleway.md similarity index 55% rename from docs/sources/flow/reference/components/discovery.scaleway.md rename to docs/sources/reference/components/discovery.scaleway.md index 44c1810118..9f36c82e33 100644 --- a/docs/sources/flow/reference/components/discovery.scaleway.md +++ b/docs/sources/reference/components/discovery.scaleway.md @@ -1,16 +1,12 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.scaleway/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.scaleway/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.scaleway/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.scaleway/ description: Learn about discovery.scaleway title: discovery.scaleway --- # discovery.scaleway -`discovery.scaleway` discovers targets from [Scaleway instances][instance] and -[baremetal services][baremetal]. +`discovery.scaleway` discovers targets from [Scaleway instances][instance] and [baremetal services][baremetal]. [instance]: https://www.scaleway.com/en/virtual-instances/ [baremetal]: https://www.scaleway.com/en/bare-metal-servers/ @@ -30,64 +26,61 @@ discovery.scaleway "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`project_id` | `string` | Scaleway project ID of targets. | | yes -`role` | `string` | Role of targets to retrieve. | | yes -`api_url` | `string` | Scaleway API URL. | `"https://api.scaleway.com"` | no -`zone` | `string` | Availability zone of targets. | `"fr-par-1"` | no -`access_key` | `string` | Access key for the Scaleway API. | | yes -`secret_key` | `secret` | Secret key for the Scaleway API. | | conditional -`secret_key_file` | `string` | Path to file containing secret key for the Scaleway API. | | conditional -`name_filter` | `string` | Name filter to apply against the listing request. | | no -`tags_filter` | `list(string)` | List of tags to search for. | | no -`refresh_interval` | `duration` | Frequency to rediscover targets. | `"60s"` | no -`port` | `number` | Default port on servers to associate with generated targets. | `80` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no - -The `role` argument determines what type of Scaleway machines to discover. It -must be set to one of the following: +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|------------------------------|------------ +`project_id` | `string` | Scaleway project ID of targets. | | yes +`role` | `string` | Role of targets to retrieve. | | yes +`api_url` | `string` | Scaleway API URL. | `"https://api.scaleway.com"` | no +`zone` | `string` | Availability zone of targets. | `"fr-par-1"` | no +`access_key` | `string` | Access key for the Scaleway API. | | yes +`secret_key` | `secret` | Secret key for the Scaleway API. | | conditional +`secret_key_file` | `string` | Path to file containing secret key for the Scaleway API. | | conditional +`name_filter` | `string` | Name filter to apply against the listing request. | | no +`tags_filter` | `list(string)` | List of tags to search for. | | no +`refresh_interval` | `duration` | Frequency to rediscover targets. | `"60s"` | no +`port` | `number` | Default port on servers to associate with generated targets. | `80` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no + +The `role` argument determines what type of Scaleway machines to discover. +It must be set to one of the following: * `"baremetal"`: Discover [baremetal][] Scaleway machines. * `"instance"`: Discover virtual Scaleway [instances][instance]. -The `name_filter` and `tags_filter` arguments can be used to filter the set of -discovered servers. `name_filter` returns machines matching a specific name, -while `tags_filter` returns machines who contain _all_ the tags listed in the -`tags_filter` argument. +The `name_filter` and `tags_filter` arguments can be used to filter the set of discovered servers. +`name_filter` returns machines matching a specific name, while `tags_filter` returns machines who contain _all_ the tags listed in the `tags_filter` argument. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.scaleway`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +-----------|----------------|--------------------------------------------------------|--------- tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -The `>` symbol indicates deeper levels of nesting. For example, -`oauth2 > tls_config` refers to a `tls_config` block defined inside -an `oauth2` block. +The `>` symbol indicates deeper levels of nesting. +For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. [tls_config]: #tls_config-block ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|----------------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Consul catalog API. When `role` is `baremetal`, discovered targets include the following labels: @@ -131,9 +124,8 @@ When `role` is `instance`, discovered targets include the following labels: ## Component health -`discovery.scaleway` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.scaleway` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.serverset.md b/docs/sources/reference/components/discovery.serverset.md similarity index 76% rename from docs/sources/flow/reference/components/discovery.serverset.md rename to docs/sources/reference/components/discovery.serverset.md index bf45a1d79a..e693c9f3cf 100644 --- a/docs/sources/flow/reference/components/discovery.serverset.md +++ b/docs/sources/reference/components/discovery.serverset.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.serverset/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.serverset/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.serverset/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.serverset/ description: Learn about discovery.serverset title: discovery.serverset --- @@ -25,7 +22,8 @@ discovery.serverset "LABEL" { } ``` -Serverset data stored in Zookeeper must be in JSON format. The Thrift format is not supported. +Serverset data stored in Zookeeper must be in JSON format. +The Thrift format isn't supported. ## Arguments @@ -33,16 +31,16 @@ The following arguments are supported: | Name | Type | Description | Default | Required | |-----------|----------------|--------------------------------------------------|---------|----------| -| `servers` | `list(string)` | The Zookeeper servers to connect to. | | yes | +| `servers` | `list(string)` | The Zookeeper servers to connect to. | | yes | | `paths` | `list(string)` | The Zookeeper paths to discover Serversets from. | | yes | -| `timeout` | `duration` | The Zookeeper session timeout | `10s` | no | +| `timeout` | `duration` | The Zookeeper session timeout | `10s` | no | ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|------------------------------- `targets` | `list(map(string))` | The set of targets discovered. The following metadata labels are available on targets during relabeling: @@ -56,9 +54,8 @@ The following metadata labels are available on targets during relabeling: ## Component health -`discovery.serverset` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.serverset` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -70,12 +67,8 @@ values. ## Example -The configuration below will connect to one of the Zookeeper servers -(either `zk1`, `zk2`, or `zk3`) and discover JSON Serversets at paths -`/path/to/znode1` and `/path/to/znode2`. The discovered targets are scraped -by the `prometheus.scrape.default` component and forwarded to -the `prometheus.remote_write.default` component, which will send the samples to -specified remote_write URL. +The configuration below will connect to one of the Zookeeper servers (either `zk1`, `zk2`, or `zk3`) and discover JSON Serversets at paths `/path/to/znode1` and `/path/to/znode2`. +The discovered targets are scraped by the `prometheus.scrape.default` component and forwarded to the `prometheus.remote_write.default` component, which will send the samples to specified remote_write URL. ```river discovery.serverset "zookeeper" { diff --git a/docs/sources/flow/reference/components/discovery.triton.md b/docs/sources/reference/components/discovery.triton.md similarity index 83% rename from docs/sources/flow/reference/components/discovery.triton.md rename to docs/sources/reference/components/discovery.triton.md index d9e3ac6a23..82578eee6f 100644 --- a/docs/sources/flow/reference/components/discovery.triton.md +++ b/docs/sources/reference/components/discovery.triton.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.triton/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.triton/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.triton/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.triton/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.triton/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.triton/ description: Learn about discovery.triton title: discovery.triton --- @@ -44,29 +39,29 @@ Name | Type | Description * `"container"` to discover virtual machines (SmartOS zones, lx/KVM/bhyve branded zones) running on Triton * `"cn"` to discover compute nodes (servers/global zones) making up the Triton infrastructure -`groups` is only supported when `role` is set to `"container"`. If omitted all -containers owned by the requesting account are scraped. +`groups` is only supported when `role` is set to `"container"`. +If omitted all containers owned by the requesting account are scraped. ## Blocks The following blocks are supported inside the definition of `discovery.triton`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +-----------|----------------|---------------------------------------------------|--------- tls_config | [tls_config][] | TLS configuration for requests to the Triton API. | no [tls_config]: #tls_config-block ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Triton API. When `role` is set to `"container"`, each target includes the following labels: @@ -85,9 +80,8 @@ When `role` is set to `"cn"` each target includes the following labels: ## Component health -`discovery.triton` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.triton` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information diff --git a/docs/sources/flow/reference/components/discovery.uyuni.md b/docs/sources/reference/components/discovery.uyuni.md similarity index 62% rename from docs/sources/flow/reference/components/discovery.uyuni.md rename to docs/sources/reference/components/discovery.uyuni.md index ab2a968bb5..8cbd7d6870 100644 --- a/docs/sources/flow/reference/components/discovery.uyuni.md +++ b/docs/sources/reference/components/discovery.uyuni.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/discovery.uyuni/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.uyuni/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.uyuni/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.uyuni/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.uyuni/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/discovery.uyuni/ description: Learn about discovery.uyuni title: discovery.uyuni --- @@ -29,43 +24,43 @@ discovery.uyuni "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------------- | ----------------------- | -------- -`server` | `string` | The primary Uyuni Server. | | yes -`username` | `string` | The username to use for authentication to the Uyuni API. | | yes -`password` | `Secret` | The password to use for authentication to the Uyuni API. | | yes -`entitlement` | `string` | The entitlement to filter on when listing targets. | `"monitoring_entitled"` | no -`separator` | `string` | The separator to use when building the `__meta_uyuni_groups` label. | `","` | no -`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `1m` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no - -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|-------------------------|--------- +`server` | `string` | The primary Uyuni Server. | | yes +`username` | `string` | The username to use for authentication to the Uyuni API. | | yes +`password` | `Secret` | The password to use for authentication to the Uyuni API. | | yes +`entitlement` | `string` | The entitlement to filter on when listing targets. | `"monitoring_entitled"` | no +`separator` | `string` | The separator to use when building the `__meta_uyuni_groups` label. | `","` | no +`refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `1m` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no + +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `discovery.uyuni`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +-----------|----------------|--------------------------------------------------|--------- tls_config | [tls_config][] | TLS configuration for requests to the Uyuni API. | no [tls_config]: #tls_config-block ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|-------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the Uyuni API. Each target includes the following labels: @@ -80,14 +75,14 @@ Each target includes the following labels: * `__meta_uyuni_metrics_path`: The path to the metrics endpoint. * `__meta_uyuni_scheme`: `https` if TLS is enabled on the endpoint, `http` otherwise. -These labels are largely derived from a [listEndpoints](https://www.uyuni-project.org/uyuni-docs-api/uyuni/api/system.monitoring.html) -API call to the Uyuni Server. +These labels are largely derived from a [listEndpoints][] API call to the Uyuni Server. + +[listEndpoints]: https://www.uyuni-project.org/uyuni-docs-api/uyuni/api/system.monitoring.html ## Component health -`discovery.uyuni` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`discovery.uyuni` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -128,6 +123,7 @@ Replace the following: - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. + ## Compatible components diff --git a/docs/sources/reference/components/faro.receiver.md b/docs/sources/reference/components/faro.receiver.md new file mode 100644 index 0000000000..a81744e1e5 --- /dev/null +++ b/docs/sources/reference/components/faro.receiver.md @@ -0,0 +1,258 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/reference/components/faro.receiver/ +description: Learn about the faro.receiver +title: faro.receiver +--- + +# faro.receiver + +`faro.receiver` accepts web application telemetry data from the [Grafana Faro Web SDK][faro-sdk] and forwards it to other components for future processing. + +[faro-sdk]: https://github.com/grafana/faro-web-sdk + +## Usage + +```river +faro.receiver "LABEL" { + output { + logs = [LOKI_RECEIVERS] + traces = [OTELCOL_COMPONENTS] + } +} +``` + +## Arguments + +The following arguments are supported: + +Name | Type | Description | Default | Required +-------------------|---------------|----------------------------------------------|---------|--------- +`extra_log_labels` | `map(string)` | Extra labels to attach to emitted log lines. | `{}` | no + +## Blocks + +The following blocks are supported inside the definition of `faro.receiver`: + +Hierarchy | Block | Description | Required +-----------------------|-------------------|------------------------------------------------------|--------- +server | [server][] | Configures the HTTP server. | no +server > rate_limiting | [rate_limiting][] | Configures rate limiting for the HTTP server. | no +sourcemaps | [sourcemaps][] | Configures sourcemap retrieval. | no +sourcemaps > location | [location][] | Configures on-disk location for sourcemap retrieval. | no +output | [output][] | Configures where to send collected telemetry data. | yes + +[server]: #server-block +[rate_limiting]: #rate_limiting-block +[sourcemaps]: #sourcemaps-block +[location]: #location-block +[output]: #output-block + +### server block + +The `server` block configures the HTTP server managed by the `faro.receiver` component. +Clients using the [Grafana Faro Web SDK][faro-sdk] forward telemetry data to this HTTP server for processing. + +Name | Type | Description | Default | Required +---------------------------|----------------|--------------------------------------------------------|-------------|--------- +`listen_address` | `string` | Address to listen for HTTP traffic on. | `127.0.0.1` | no +`listen_port` | `number` | Port to listen for HTTP traffic on. | `12347` | no +`cors_allowed_origins` | `list(string)` | Origins for which cross-origin requests are permitted. | `[]` | no +`api_key` | `secret` | Optional API key to validate client requests with. | `""` | no +`max_allowed_payload_size` | `string` | Maximum size (in bytes) for client requests. | `"5MiB"` | no + +By default, telemetry data is only accepted from applications on the same local network as the browser. +To accept telemetry data from a wider set of clients, modify the `listen_address` attribute to the IP address of the appropriate network interface to use. + +The `cors_allowed_origins` argument determines what origins browser requests may come from. +The default value, `[]`, disables CORS support. +To support requests from all origins, set `cors_allowed_origins` to `["*"]`. +The `*` character indicates a wildcard. + +When the `api_key` argument is non-empty, client requests must have an HTTP header called `X-API-Key` matching the value of the `api_key` argument. +Requests that are missing the header or have the wrong value are rejected with an `HTTP 401 Unauthorized` status code. +If the `api_key` argument is empty, no authentication checks are performed, and the `X-API-Key` HTTP header is ignored. + +### rate_limiting block + +The `rate_limiting` block configures rate limiting for client requests. + +Name | Type | Description | Default | Required +-------------|----------|--------------------------------------|---------|--------- +`enabled` | `bool` | Whether to enable rate limiting. | `true` | no +`rate` | `number` | Rate of allowed requests per second. | `50` | no +`burst_size` | `number` | Allowed burst size of requests. | `100` | no + +Rate limiting functions as a [token bucket algorithm][token-bucket], where a bucket has a maximum capacity for up to `burst_size` requests and refills at a rate of `rate` per second. + +Each HTTP request drains the capacity of the bucket by one. Once the bucket is empty, HTTP requests are rejected with an `HTTP 429 Too Many Requests` status code until the bucket has more available capacity. + +Configuring the `rate` argument determines how fast the bucket refills, and configuring the `burst_size` argument determines how many requests can be received in a burst before the bucket is empty and starts rejecting requests. + +[token-bucket]: https://en.wikipedia.org/wiki/Token_bucket + +### sourcemaps block + +The `sourcemaps` block configures how to retrieve sourcemaps. +Sourcemaps are then used to transform file and line information from minified code into the file and line information from the original source code. + +Name | Type | Description | Default | Required +------------------------|----------------|--------------------------------------------|---------|--------- +`download` | `bool` | Whether to download sourcemaps. | `true` | no +`download_from_origins` | `list(string)` | Which origins to download sourcemaps from. | `["*"]` | no +`download_timeout` | `duration` | Timeout when downloading sourcemaps. | `"1s"` | no + +When exceptions are sent to the `faro.receiver` component, it can download sourcemaps from the web application. +You can disable this behavior by setting the `download` argument to `false`. + +The `download_from_origins` argument determines which origins a sourcemap may be downloaded from. +The origin is attached to the URL that a browser is sending telemetry data from. +The default value, `["*"]`, enables downloading sourcemaps from all origins. +The `*` character indicates a wildcard. + +By default, sourcemap downloads are subject to a timeout of `"1s"`, specified by the `download_timeout` argument. +Setting `download_timeout` to `"0s"` disables timeouts. + +To retrieve sourcemaps from disk instead of the network, specify one or more [`location` blocks][location]. +When `location` blocks are provided, they are checked first for sourcemaps before falling back to downloading. + +### location block + +The `location` block declares a location where sourcemaps are stored on the filesystem. +The `location` block can be specified multiple times to declare multiple locations where sourcemaps are stored. + +Name | Type | Description | Default | Required +-----------------------|----------|-----------------------------------------------------|---------|--------- +`path` | `string` | The path on disk where sourcemaps are stored. | | yes +`minified_path_prefix` | `string` | The prefix of the minified path sent from browsers. | | yes + +The `minified_path_prefix` argument determines the prefix of paths to Javascript files, such as `http://example.com/`. +The `path` argument then determines where to find the sourcemap for the file. + +For example, given the following location block: + +``` +location { + path = "/var/my-app/build" + minified_path_prefix = "http://example.com/" +} +``` + +To look up the sourcemaps for a file hosted at `http://example.com/foo.js`, the `faro.receiver` component will: + +1. Remove the minified path prefix to extract the path to the file (`foo.js`). +2. Search for that file path with a `.map` extension (`foo.js.map`) in `path` (`/var/my-app/build/foo.js.map`). + +Optionally, the value for the `path` argument may contain `{{ .Release }}` as a template value, such as `/var/my-app/{{ .Release }}/build`. +The template value will be replaced with the release value provided by the [Faro Web App SDK][faro-sdk]. + +### output block + +The `output` block specifies where to forward collected logs and traces. + +Name | Type | Description | Default | Required +---------|--------------------------|------------------------------------------------------|---------|--------- +`logs` | `list(LogsReceiver)` | A list of `loki` components to forward logs to. | `[]` | no +`traces` | `list(otelcol.Consumer)` | A list of `otelcol` components to forward traces to. | `[]` | no + +## Exported fields + +`faro.receiver` does not export any fields. + +## Component health + +`faro.receiver` is reported as unhealthy when the integrated server fails to +start. + +## Debug information + +`faro.receiver` does not expose any component-specific debug information. + +## Debug metrics + +`faro.receiver` exposes the following metrics for monitoring the component: + +* `faro_receiver_logs_total` (counter): Total number of ingested logs. +* `faro_receiver_measurements_total` (counter): Total number of ingested measurements. +* `faro_receiver_exceptions_total` (counter): Total number of ingested exceptions. +* `faro_receiver_events_total` (counter): Total number of ingested events. +* `faro_receiver_exporter_errors_total` (counter): Total number of errors produced by an internal exporter. +* `faro_receiver_request_duration_seconds` (histogram): Time (in seconds) spent serving HTTP requests. +* `faro_receiver_request_message_bytes` (histogram): Size (in bytes) of HTTP requests received from clients. +* `faro_receiver_response_message_bytes` (histogram): Size (in bytes) of HTTP responses sent to clients. +* `faro_receiver_inflight_requests` (gauge): Current number of inflight requests. +* `faro_receiver_sourcemap_cache_size` (counter): Number of items in sourcemap cache per origin. +* `faro_receiver_sourcemap_downloads_total` (counter): Total number of sourcemap downloads performed per origin and status. +* `faro_receiver_sourcemap_file_reads_total` (counter): Total number of sourcemap retrievals using the filesystem per origin and status. + +## Example + +```river +faro.receiver "default" { + server { + listen_address = "NETWORK_ADDRESS" + } + + sourcemaps { + location { + path = "PATH_TO_SOURCEMAPS" + minified_path_prefix = "WEB_APP_PREFIX" + } + } + + output { + logs = [loki.write.default.receiver] + traces = [otelcol.exporter.otlp.traces.input] + } +} + +loki.write "default" { + endpoint { + url = "https://LOKI_ADDRESS/api/v1/push" + } +} + +otelcol.exporter.otlp "traces" { + client { + endpoint = "OTLP_ADDRESS" + } +} +``` + +Replace the following: + +* `NETWORK_ADDRESS`: IP address of the network interface to listen to traffic on. + This IP address must be reachable by browsers using the web application to instrument. + +* `PATH_TO_SOURCEMAPS`: Path on disk where sourcemaps are located. + +* `WEB_APP_PREFIX`: Prefix of the web application being instrumented. + +* `LOKI_ADDRESS`: Address of the Loki server to send logs to. + + * If authentication is required to send logs to the Loki server, refer to the + documentation of [loki.write][] for more information. + +* `OTLP_ADDRESS`: The address of the OTLP-compatible server to send traces to. + + * If authentication is required to send logs to the Loki server, refer to the + documentation of [otelcol.exporter.otlp][] for more information. + +[loki.write]: ../loki.write/ +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ + + + +## Compatible components + +`faro.receiver` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`](../../compatibility/#loki-logsreceiver-exporters) +- Components that export [OpenTelemetry `otelcol.Consumer`](../../compatibility/#opentelemetry-otelcolconsumer-exporters) + + +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} + + diff --git a/docs/sources/reference/components/local.file.md b/docs/sources/reference/components/local.file.md new file mode 100644 index 0000000000..69628d9f01 --- /dev/null +++ b/docs/sources/reference/components/local.file.md @@ -0,0 +1,72 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/reference/components/local.file/ +description: Learn about local.file +title: local.file +--- + +# local.file + +`local.file` exposes the contents of a file on disk to other components. +The file will be watched for changes so that its latest content is always exposed. + +The most common use of `local.file` is to load secrets (e.g., API keys) from files. + +Multiple `local.file` components can be specified by giving them different labels. + +## Usage + +```river +local.file "LABEL" { + filename = FILE_NAME +} +``` + +## Arguments + +The following arguments are supported: + +Name | Type | Description | Default | Required +-----------------|------------|----------------------------------------------------|--------------|--------- +`filename` | `string` | Path of the file on disk to watch | | yes +`detector` | `string` | Which file change detector to use (fsnotify, poll) | `"fsnotify"` | no +`poll_frequency` | `duration` | How often to poll for file changes | `"1m"` | no +`is_secret` | `bool` | Marks the file as containing a [secret][] | `false` | no + +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets + +{{< docs/shared lookup="reference/components/local-file-arguments-text.md" source="alloy" version="" >}} + +## Exported fields + +The following fields are exported and can be referenced by other components: + +Name | Type | Description +----------|----------------------|--------------------------------------------------- +`content` | `string` or `secret` | The contents of the file from the most recent read + +The `content` field will have the `secret` type only if the `is_secret` argument was true. + +## Component health + +`local.file` will be reported as healthy whenever if the watched file was read successfully. + +Failing to read the file whenever an update is detected (or after the poll period elapses) will cause the component to be reported as unhealthy. +When unhealthy, exported fields will be kept at the last healthy value. +The read error will be exposed as a log message and in the debug information for the component. + +## Debug information + +`local.file` does not expose any component-specific debug information. + +## Debug metrics + +* `agent_local_file_timestamp_last_accessed_unix_seconds` (gauge): The timestamp, in Unix seconds, that the file was last successfully accessed. + +## Example + +```river +local.file "secret_key" { + filename = "/var/secrets/password.txt" + is_secret = true +} +``` diff --git a/docs/sources/flow/reference/components/local.file_match.md b/docs/sources/reference/components/local.file_match.md similarity index 85% rename from docs/sources/flow/reference/components/local.file_match.md rename to docs/sources/reference/components/local.file_match.md index 1413a1f8a2..5831311871 100644 --- a/docs/sources/flow/reference/components/local.file_match.md +++ b/docs/sources/reference/components/local.file_match.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/local.file_match/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/local.file_match/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/local.file_match/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/local.file_match/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/local.file_match/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/local.file_match/ description: Learn about local.file_match title: local.file_match --- @@ -42,8 +37,8 @@ Name | Type | Description The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|---------------------|--------------------------------------------------- `targets` | `list(map(string))` | The set of targets discovered from the filesystem. Each target includes the following labels: @@ -52,9 +47,8 @@ Each target includes the following labels: ## Component health -`local.file_match` is only reported as unhealthy when given an invalid -configuration. In those cases, exported fields retain their last healthy -values. +`local.file_match` is only reported as unhealthy when given an invalid configuration. +In those cases, exported fields retain their last healthy values. ## Debug information @@ -68,8 +62,8 @@ values. ### Send `/tmp/logs/*.log` files to Loki -This example discovers all files and folders under `/tmp/logs`. The absolute paths are -used by `loki.source.file.files` targets. +This example discovers all files and folders under `/tmp/logs`. +The absolute paths are used by `loki.source.file.files` targets. ```river local.file_match "tmp" { diff --git a/docs/sources/flow/reference/components/loki.echo.md b/docs/sources/reference/components/loki.echo.md similarity index 67% rename from docs/sources/flow/reference/components/loki.echo.md rename to docs/sources/reference/components/loki.echo.md index eb16448a86..675c2ef3ce 100644 --- a/docs/sources/flow/reference/components/loki.echo.md +++ b/docs/sources/reference/components/loki.echo.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.echo/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.echo/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.echo/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.echo/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.echo/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.echo/ description: Learn about loki.echo labels: stage: beta @@ -13,13 +8,11 @@ title: loki.echo # loki.echo -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} -`loki.echo` receives log entries from other `loki` components and prints them -to the process' standard output (stdout). +`loki.echo` receives log entries from other `loki` components and prints them to the process' standard output (stdout). -Multiple `loki.echo` components can be specified by giving them -different labels. +Multiple `loki.echo` components can be specified by giving them different labels. ## Usage @@ -35,8 +28,8 @@ loki.echo "LABEL" {} The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +-----------|----------------|-------------------------------------------------------------- `receiver` | `LogsReceiver` | A value that other components can use to send log entries to. ## Component health @@ -49,8 +42,7 @@ Name | Type | Description ## Example -This example creates a pipeline that reads log files from `/var/log` and -prints log lines to echo: +This example creates a pipeline that reads log files from `/var/log` and prints log lines to echo: ```river local.file_match "varlog" { diff --git a/docs/sources/flow/reference/components/loki.process.md b/docs/sources/reference/components/loki.process.md similarity index 81% rename from docs/sources/flow/reference/components/loki.process.md rename to docs/sources/reference/components/loki.process.md index f30efb5767..93f3455d0b 100644 --- a/docs/sources/flow/reference/components/loki.process.md +++ b/docs/sources/reference/components/loki.process.md @@ -4,27 +4,20 @@ aliases: - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.process/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.process/ - /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.process/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.process/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.process/ description: Learn about loki.process title: loki.process --- # loki.process -`loki.process` receives log entries from other loki components, applies one or -more processing _stages_, and forwards the results to the list of receivers -in the component's arguments. +`loki.process` receives log entries from other loki components, applies one or more processing _stages_, and forwards the results to the list of receivers in the component's arguments. -A stage is a multi-purpose tool that can parse, transform, and filter log -entries before they're passed to a downstream component. These stages are -applied to each log entry in order of their appearance in the configuration -file. All stages within a `loki.process` block have access to the log entry's -label set, the log line, the log timestamp, as well as a shared map of -'extracted' values so that the results of one stage can be used in a subsequent -one. +A stage is a multi-purpose tool that can parse, transform, and filter log entries before they're passed to a downstream component. +These stages are applied to each log entry in order of their appearance in the configuration file. +All stages within a `loki.process` block have access to the log entry's label set, the log line, the log timestamp, as well as a shared map of 'extracted' values so that the results of one stage can be used in a subsequent one. -Multiple `loki.process` components can be specified by giving them -different labels. +Multiple `loki.process` components can be specified by giving them different labels. ## Usage @@ -79,9 +72,7 @@ The following blocks are supported inside the definition of `loki.process`: | stage.tenant | [stage.tenant][] | Configures a `tenant` processing stage. | no | | stage.timestamp | [stage.timestamp][] | Configures a `timestamp` processing stage. | no | -A user can provide any number of these stage blocks nested inside -`loki.process`; these will run in order of appearance in the configuration -file. +A user can provide any number of these stage blocks nested inside `loki.process`; these will run in order of appearance in the configuration file. [stage.cri]: #stagecri-block [stage.decolorize]: #stagedecolorize-block @@ -112,8 +103,7 @@ file. ### stage.cri block -The `stage.cri` inner block enables a predefined pipeline which reads log lines using -the CRI logging format. +The `stage.cri` inner block enables a predefined pipeline which reads log lines using the CRI logging format. The following arguments are supported: @@ -123,23 +113,21 @@ The following arguments are supported: | `max_partial_line_size` | `number` | Maximum number of characters which a partial line can have. | `0` | no | | `max_partial_line_size_truncate` | `bool` | Truncate partial lines that are longer than `max_partial_line_size`. | `false` | no | -`max_partial_line_size` is only taken into account if -`max_partial_line_size_truncate` is set to `true`. +`max_partial_line_size` is only taken into account if `max_partial_line_size_truncate` is set to `true`. ```river stage.cri {} ``` -CRI specifies log lines as single space-delimited values with the following -components: +CRI specifies log lines as single space-delimited values with the following components: * `time`: The timestamp string of the log * `stream`: Either `stdout` or `stderr` * `flags`: CRI flags including `F` or `P` * `log`: The contents of the log line -Given the following log line, the subsequent key-value pairs are created in the -shared map of extracted data: +Given the following log line, the subsequent key-value pairs are created in the shared map of extracted data: + ``` "2019-04-30T02:12:41.8443515Z stdout F message" @@ -150,18 +138,15 @@ timestamp: 2019-04-30T02:12:41.8443515 ### stage.decolorize block -The `stage.decolorize` strips ANSI color codes from the log lines, thus making -it easier to parse logs further. +The `stage.decolorize` strips ANSI color codes from the log lines, thus making it easier to parse logs further. -The `stage.decolorize` block does not support any arguments or inner blocks, so -it is always empty. +The `stage.decolorize` block does not support any arguments or inner blocks, so it is always empty. ```river stage.decolorize {} ``` -`stage.decolorize` turns each line having a color code into a non-colored one, -for example: +`stage.decolorize` turns each line having a color code into a non-colored one, for example: ``` [2022-11-04 22:17:57.811] \033[0;32http\033[0m: GET /_health (0 ms) 204 @@ -175,11 +160,9 @@ is turned into ### stage.docker block -The `stage.docker` inner block enables a predefined pipeline which reads log lines in -the standard format of Docker log files. +The `stage.docker` inner block enables a predefined pipeline which reads log lines in the standard format of Docker log files. -The `stage.docker` block does not support any arguments or inner blocks, so it is -always empty. +The `stage.docker` block does not support any arguments or inner blocks, so it is always empty. ```river stage.docker {} @@ -191,8 +174,7 @@ Docker log entries are formatted as JSON with the following keys: * `stream`: Either `stdout` or `stderr` * `time`: The timestamp string of the log line -Given the following log line, the subsequent key-value pairs are created in the -shared map of extracted data: +Given the following log line, the subsequent key-value pairs are created in the shared map of extracted data: ``` {"log":"log message\n","stream":"stderr","time":"2019-04-30T02:12:41.8443515Z"} @@ -204,9 +186,8 @@ timestamp: 2019-04-30T02:12:41.8443515 ### stage.drop block -The `stage.drop` inner block configures a filtering stage that drops log entries -based on several options. If multiple options are provided, they're treated -as AND clauses and must _all_ be true for the log entry to be dropped. +The `stage.drop` inner block configures a filtering stage that drops log entries based on several options. +If multiple options are provided, they're treated as AND clauses and must _all_ be true for the log entry to be dropped. To drop entries with an OR clause, specify multiple `drop` blocks in sequence. The following arguments are supported: @@ -222,28 +203,17 @@ The following arguments are supported: | `drop_counter_reason` | `string` | A custom reason to report for dropped lines. | `"drop_stage"` | no | The `expression` field must be a RE2 regex string. -* If `source` is empty or not provided, the regex attempts to match the log -line itself. -* If `source` is a single name, the regex attempts to match the corresponding -value from the extracted map. -* If `source` is a comma-separated list of names, the corresponding values from -the extracted map are concatenated using `separator` and the regex attempts to -match the concatenated string. - -The `value` field can only work with values from the extracted map, and must be -specified together with `source`. -* If `source` is a single name, the entries are dropped when there is an exact -match between the corresponding value from the extracted map and the `value`. -* If `source` is a comma-separated list of names, the entries are dropped when -the `value` matches the `source` values from extracted data, concatenated using -the `separator`. - -Whenever an entry is dropped, the metric `loki_process_dropped_lines_total` -is incremented. By default, the reason label is `"drop_stage"`, but you can -provide a custom label using the `drop_counter_reason` argument. - -The following stage drops log entries that contain the word `debug` _and_ are -longer than 1KB. +* If `source` is empty or not provided, the regex attempts to match the log line itself. +* If `source` is a single name, the regex attempts to match the corresponding value from the extracted map. +* If `source` is a comma-separated list of names, the corresponding values from the extracted map are concatenated using `separator` and the regex attempts to match the concatenated string. + +The `value` field can only work with values from the extracted map, and must be specified together with `source`. +* If `source` is a single name, the entries are dropped when there is an exact match between the corresponding value from the extracted map and the `value`. +* If `source` is a comma-separated list of names, the entries are dropped when the `value` matches the `source` values from extracted data, concatenated using the `separator`. + +Whenever an entry is dropped, the metric `loki_process_dropped_lines_total` is incremented. By default, the reason label is `"drop_stage"`, but you can provide a custom label using the `drop_counter_reason` argument. + +The following stage drops log entries that contain the word `debug` _and_ are longer than 1KB. ```river stage.drop { @@ -252,9 +222,7 @@ stage.drop { } ``` -On the following example, we define multiple `drop` blocks so `loki.process` -drops entries that are either 24h or older, are longer than 8KB, _or_ the -extracted value of 'app' is equal to foo. +On the following example, we define multiple `drop` blocks so `loki.process` drops entries that are either 24h or older, are longer than 8KB, _or_ the extracted value of 'app' is equal to foo. ```river stage.drop { @@ -275,8 +243,7 @@ stage.drop { ### stage.eventlogmessage block -The `eventlogmessage` stage extracts data from the Message string that appears -in the Windows Event Log. +The `eventlogmessage` stage extracts data from the Message string that appears in the Windows Event Log. The following arguments are supported: @@ -286,20 +253,18 @@ The following arguments are supported: | `overwrite_existing` | `bool` | Whether to overwrite existing extracted data fields. | `false` | no | | `drop_invalid_labels` | `bool` | Whether to drop fields that are not valid label names. | `false` | no | -When `overwrite_existing` is set to `true`, the stage overwrites existing extracted data -fields with the same name. If set to `false`, the `_extracted` suffix will be -appended to an already existing field name. +When `overwrite_existing` is set to `true`, the stage overwrites existing extracted data fields with the same name. +If set to `false`, the `_extracted` suffix will be appended to an already existing field name. -When `drop_invalid_labels` is set to `true`, the stage drops fields that are -not valid label names. If set to `false`, the stage will automatically convert -them into valid labels replacing invalid characters with underscores. +When `drop_invalid_labels` is set to `true`, the stage drops fields that are not valid label names. +If set to `false`, the stage will automatically convert them into valid labels replacing invalid characters with underscores. #### Example combined with `stage.json` ```river stage.json { - expressions = { - message = "", + expressions = { + message = "", Overwritten = "", } } @@ -315,14 +280,12 @@ Given the following log line: {"event_id": 1, "Overwritten": "old", "message": "Message type:\r\nOverwritten: new\r\nImage: C:\\Users\\User\\agent.exe"} ``` -The first stage would create the following key-value pairs in the set of -extracted data: +The first stage would create the following key-value pairs in the set of extracted data: - `message`: `Message type:\r\nOverwritten: new\r\nImage: C:\Users\User\agent.exe` - `Overwritten`: `old` -The second stage will parse the value of `message` from the extracted data -and append/overwrite the following key-value pairs to the set of extracted data: +The second stage will parse the value of `message` from the extracted data and append/overwrite the following key-value pairs to the set of extracted data: - `Image`: `C:\\Users\\User\\agent.exe` - `Message_type`: (empty string) @@ -330,10 +293,9 @@ and append/overwrite the following key-value pairs to the set of extracted data: ### stage.json block -The `stage.json` inner block configures a JSON processing stage that parses incoming -log lines or previously extracted values as JSON and uses -[JMESPath expressions](https://jmespath.org/tutorial.html) to extract new -values from them. +The `stage.json` inner block configures a JSON processing stage that parses incoming log lines or previously extracted values as JSON and uses [JMESPath expressions][] to extract new values from them. + +[JMESPath expressions]: https://jmespath.org/tutorial.html The following arguments are supported: @@ -343,13 +305,11 @@ The following arguments are supported: | `source` | `string` | Source of the data to parse as JSON. | `""` | no | | `drop_malformed` | `bool` | Drop lines whose input cannot be parsed as valid JSON. | `false` | no | -When configuring a JSON stage, the `source` field defines the source of data to -parse as JSON. By default, this is the log line itself, but it can also be a -previously extracted value. +When configuring a JSON stage, the `source` field defines the source of data to parse as JSON. +By default, this is the log line itself, but it can also be a previously extracted value. -The `expressions` field is the set of key-value pairs of JMESPath expressions to -run. The map key defines the name with which the data is extracted, while the -map value is the expression used to populate the value. +The `expressions` field is the set of key-value pairs of JMESPath expressions to run. +The map key defines the name with which the data is extracted, while the map value is the expression used to populate the value. Here's a given log line and two JSON stages to run. @@ -368,27 +328,22 @@ loki.process "username" { } ``` -In this example, the first stage uses the log line as the source and populates -these values in the shared map. An empty expression means using the same value -as the key (so `extra="extra"`). +In this example, the first stage uses the log line as the source and populates these values in the shared map. +An empty expression means using the same value as the key (so `extra="extra"`). ``` output: log message\n extra: {"user": "agent"} ``` -The second stage uses the value in `extra` as the input and appends the -following key-value pair to the set of extracted data. +The second stage uses the value in `extra` as the input and appends the following key-value pair to the set of extracted data. ``` username: agent ``` {{< admonition type="note" >}} -Due to a limitation of the upstream jmespath library, you must wrap any string -that contains a hyphen `-` in quotes so that it's not considered a numerical -expression. - -If you don't use quotes to wrap a string that contains a hyphen, you will get -errors like: `Unexpected token at the end of the expression: tNumber` +Due to a limitation of the upstream jmespath library, you must wrap any string that contains a hyphen `-` in quotes so that it's not considered a numerical expression. + +If you don't use quotes to wrap a string that contains a hyphen, you will get errors like: `Unexpected token at the end of the expression: tNumber` You can use one of two options to circumvent this issue: @@ -415,8 +370,7 @@ stage.label_drop { ### stage.label_keep block -The `stage.label_keep` inner block configures a processing stage that filters the -label set of an incoming log entry down to a subset. +The `stage.label_keep` inner block configures a processing stage that filters the label set of an incoming log entry down to a subset. The following arguments are supported: @@ -433,8 +387,7 @@ stage.label_keep { ### stage.labels block -The `stage.labels` inner block configures a labels processing stage that can read -data from the extracted values map and set new labels on incoming log entries. +The `stage.labels` inner block configures a labels processing stage that can read data from the extracted values map and set new labels on incoming log entries. The following arguments are supported: @@ -442,9 +395,8 @@ The following arguments are supported: | -------- | ------------- | --------------------------------------- | ------- | -------- | | `values` | `map(string)` | Configures a `labels` processing stage. | `{}` | no | -In a labels stage, the map's keys define the label to set and the values are -how to look them up. If the value is empty, it is inferred to be the same as -the key. +In a labels stage, the map's keys define the label to set and the values are how to look them up. +If the value is empty, it is inferred to be the same as the key. ```river stage.labels { @@ -457,8 +409,7 @@ stage.labels { ### stage.structured_metadata block -The `stage.structured_metadata` inner block configures a stage that can read -data from the extracted values map and add them to log entries as structured metadata. +The `stage.structured_metadata` inner block configures a stage that can read data from the extracted values map and add them to log entries as structured metadata. The following arguments are supported: @@ -466,9 +417,8 @@ The following arguments are supported: | -------- | ------------- |-----------------------------------------------------------------------------| ------- | -------- | | `values` | `map(string)` | Specifies the list of labels to add from extracted values map to log entry. | `{}` | no | -In a structured_metadata stage, the map's keys define the label to set and the values are -how to look them up. If the value is empty, it is inferred to be the same as -the key. +In a structured_metadata stage, the map's keys define the label to set and the values are how to look them up. +If the value is empty, it is inferred to be the same as the key. ```river stage.structured_metadata { @@ -481,8 +431,7 @@ stage.structured_metadata { ### stage.limit block -The `stage.limit` inner block configures a rate-limiting stage that throttles logs -based on several options. +The `stage.limit` inner block configures a rate-limiting stage that throttles logs based on several options. The following arguments are supported: @@ -494,10 +443,8 @@ The following arguments are supported: | `drop` | `bool` | Whether to discard or backpressure lines that exceed the rate limit. | `false` | no | | `max_distinct_labels` | `number` | The number of unique values to keep track of when rate-limiting `by_label_name`. | `10000` | no | -The rate limiting is implemented as a "token bucket" of size `burst`, initially -full and refilled at `rate` tokens per second. Each received log entry consumes one token from the bucket. When `drop` is set to true, incoming entries -that exceed the rate-limit are dropped, otherwise they are queued until -more tokens are available. +The rate limiting is implemented as a "token bucket" of size `burst`, initially full and refilled at `rate` tokens per second. +Each received log entry consumes one token from the bucket. When `drop` is set to true, incoming entries that exceed the rate-limit are dropped, otherwise they are queued until more tokens are available. ```river stage.limit { @@ -506,13 +453,13 @@ stage.limit { } ``` -If `by_label_name` is set, then `drop` must be set to `true`. This enables the -stage to rate-limit not by the number of lines but by the number of labels. +If `by_label_name` is set, then `drop` must be set to `true`. +This enables the stage to rate-limit not by the number of lines but by the number of labels. + +The following example rate-limits entries from each unique `namespace` value independently. +Any entries without the `namespace` label are not rate-limited. +The stage keeps track of up to `max_distinct_labels` unique values, defaulting at 10000. -The following example rate-limits entries from each unique `namespace` value -independently. Any entries without the `namespace` label are not rate-limited. -The stage keeps track of up to `max_distinct_labels` unique -values, defaulting at 10000. ```river stage.limit { rate = 10 @@ -525,8 +472,7 @@ stage.limit { ### stage.logfmt block -The `stage.logfmt` inner block configures a processing stage that reads incoming log -lines as logfmt and extracts values from them. +The `stage.logfmt` inner block configures a processing stage that reads incoming log lines as logfmt and extracts values from them. The following arguments are supported: @@ -536,14 +482,12 @@ The following arguments are supported: | `source` | `string` | Source of the data to parse as logfmt. | `""` | no | -The `source` field defines the source of data to parse as logfmt. When `source` -is missing or empty, the stage parses the log line itself, but it can also be -used to parse a previously extracted value. +The `source` field defines the source of data to parse as logfmt. When `source` is missing or empty, the stage parses the log line itself, but it can also be used to parse a previously extracted value. + +This stage uses the [go-logfmt][] unmarshaler, so that numeric or boolean types are unmarshalled into their correct form. The stage does not perform any other type conversions. +If the extracted value is a complex type, it is treated as a string. -This stage uses the [go-logfmt](https://github.com/go-logfmt/logfmt) -unmarshaler, so that numeric or boolean types are unmarshalled into their -correct form. The stage does not perform any other type conversions. If the -extracted value is a complex type, it is treated as a string. +[go-logfmt]: https://github.com/go-logfmt/logfmt Let's see how this works on the following log line and stages. @@ -560,17 +504,13 @@ stage.logfmt { } ``` -The first stage parses the log line itself and inserts the `extra` key in the -set of extracted data, with the value of `user=foo`. +The first stage parses the log line itself and inserts the `extra` key in the set of extracted data, with the value of `user=foo`. -The second stage parses the contents of `extra` and appends the `username: foo` -key-value pair to the set of extracted data. +The second stage parses the contents of `extra` and appends the `username: foo` key-value pair to the set of extracted data. ### stage.match block -The `stage.match` inner block configures a filtering stage that can conditionally -either apply a nested set of processing stages or drop an entry when a log -entry matches a configurable LogQL stream selector and filter expressions. +The `stage.match` inner block configures a filtering stage that can conditionally either apply a nested set of processing stages or drop an entry when a log entry matches a configurable LogQL stream selector and filter expressions. The following arguments are supported: @@ -585,18 +525,16 @@ The following arguments are supported: The filters do not include label filter expressions such as `| label == "foobar"`. {{< /admonition >}} -The `stage.match` block supports a number of `stage.*` inner blocks, like the top-level -block. These are used to construct the nested set of stages to run if the -selector matches the labels and content of the log entries. It supports all the -same `stage.NAME` blocks as the in the top level of the loki.process component. +The `stage.match` block supports a number of `stage.*` inner blocks, like the top-level block. +These are used to construct the nested set of stages to run if the selector matches the labels and content of the log entries. +It supports all the same `stage.NAME` blocks as the in the top level of the `loki.process` component. + +If the specified action is `"drop"`, the metric `loki_process_dropped_lines_total` is incremented with every line dropped. +By default, the reason label is `"match_stage"`, but a custom reason can be provided by using the `drop_counter_reason` argument. -If the specified action is `"drop"`, the metric -`loki_process_dropped_lines_total` is incremented with every line dropped. -By default, the reason label is `"match_stage"`, but a custom reason can be -provided by using the `drop_counter_reason` argument. +Let's see this in action, with the following log lines and stages: -Let's see this in action, with the following log lines and stages ``` { "time":"2023-01-18T17:08:41+00:00", "app":"foo", "component": ["parser","type"], "level" : "WARN", "message" : "app1 log line" } { "time":"2023-01-18T17:08:42+00:00", "app":"bar", "component": ["parser","type"], "level" : "ERROR", "message" : "foo noisy error" } @@ -636,35 +574,24 @@ stage.output { } ``` -The first two stages parse the log lines as JSON, decode the `app` value into -the shared extracted map as `appname`, and use its value as the `applbl` label. +The first two stages parse the log lines as JSON, decode the `app` value into the shared extracted map as `appname`, and use its value as the `applbl` label. -The third stage uses the LogQL selector to only execute the nested stages on -lines where the `applbl="foo"`. So, for the first line, the nested JSON stage -adds `msg="app1 log line"` into the extracted map. +The third stage uses the LogQL selector to only execute the nested stages on lines where the `applbl="foo"`. +So, for the first line, the nested JSON stage adds `msg="app1 log line"` into the extracted map. -The fourth stage uses the LogQL selector to only execute on lines where -`applbl="qux"`; that means it won't match any of the input, and the nested -JSON stage does not run. +The fourth stage uses the LogQL selector to only execute on lines where `applbl="qux"`; that means it won't match any of the input, and the nested JSON stage does not run. -The fifth stage drops entries from lines where `applbl` is set to 'bar' and the -line contents matches the regex `.*noisy error.*`. It also increments the -`loki_process_dropped_lines_total` metric with a label -`drop_counter_reason="discard_noisy_errors"`. +The fifth stage drops entries from lines where `applbl` is set to 'bar' and the line contents matches the regex `.*noisy error.*`. +It also increments the `loki_process_dropped_lines_total` metric with a label `drop_counter_reason="discard_noisy_errors"`. -The final output stage changes the contents of the log line to be the value of -`msg` from the extracted map. In this case, the first log entry's content is -changed to `app1 log line`. +The final output stage changes the contents of the log line to be the value of `msg` from the extracted map. In this case, the first log entry's content is changed to `app1 log line`. ### stage.metrics block -The `stage.metrics` inner block configures stage that allows to define and -update metrics based on values from the shared extracted map. The created -metrics are available at the Agent's root /metrics endpoint. +The `stage.metrics` inner block configures stage that allows to define and update metrics based on values from the shared extracted map. +The created metrics are available at the Agent's root /metrics endpoint. -The `stage.metrics` block does not support any arguments and is only configured via -a number of nested inner `metric.*` blocks, one for each metric that should be -generated. +The `stage.metrics` block does not support any arguments and is only configured via a number of nested inner `metric.*` blocks, one for each metric that should be generated. The following blocks are supported inside the definition of `stage.metrics`: @@ -680,28 +607,27 @@ The following blocks are supported inside the definition of `stage.metrics`: #### metric.counter block + Defines a metric whose value only goes up. The following arguments are supported: -| Name | Type | Description | Default | Required | -|---------------------|------------|----------------------------------------------------------------------------------------------------------|--------------------------|----------| -| `name` | `string` | The metric name. | | yes | -| `action` | `string` | The action to take. Valid actions are `set`, `inc`, `dec`,` add`, or `sub`. | | yes | -| `description` | `string` | The metric's description and help text. | `""` | no | -| `source` | `string` | Key from the extracted data map to use for the metric. Defaults to the metric name. | `""` | no | -| `prefix` | `string` | The prefix to the metric name. | `"loki_process_custom_"` | no | -| `max_idle_duration` | `duration` | Maximum amount of time to wait until the metric is marked as 'stale' and removed. | `"5m"` | no | -| `value` | `string` | If set, the metric only changes if `source` exactly matches the `value`. | `""` | no | -| `match_all` | `bool` | If set to true, all log lines are counted, without attemptng to match the `source` to the extracted map. | `false` | no | -| `count_entry_bytes` | `bool` | If set to true, counts all log lines bytes. | `false` | no | +| Name | Type | Description | Default | Required | +|---------------------|------------|-----------------------------------------------------------------------------------------------------------|--------------------------|----------| +| `name` | `string` | The metric name. | | yes | +| `action` | `string` | The action to take. Valid actions are `set`, `inc`, `dec`,` add`, or `sub`. | | yes | +| `description` | `string` | The metric's description and help text. | `""` | no | +| `source` | `string` | Key from the extracted data map to use for the metric. Defaults to the metric name. | `""` | no | +| `prefix` | `string` | The prefix to the metric name. | `"loki_process_custom_"` | no | +| `max_idle_duration` | `duration` | Maximum amount of time to wait until the metric is marked as 'stale' and removed. | `"5m"` | no | +| `value` | `string` | If set, the metric only changes if `source` exactly matches the `value`. | `""` | no | +| `match_all` | `bool` | If set to true, all log lines are counted, without attempting to match the `source` to the extracted map. | `false` | no | +| `count_entry_bytes` | `bool` | If set to true, counts all log lines bytes. | `false` | no | A counter cannot set both `match_all` to true _and_ a `value`. -A counter cannot set `count_entry_bytes` without also setting `match_all=true` -_or_ `action=add`. -The valid `action` values are `inc` and `add`. The `inc` action increases the -metric value by 1 for each log line that passed the filter. The `add` action -converts the extracted value to a positive float and adds it to the metric. +A counter cannot set `count_entry_bytes` without also setting `match_all=true` _or_ `action=add`. +The valid `action` values are `inc` and `add`. The `inc` action increases the metric value by 1 for each log line that passed the filter. +The `add` action converts the extracted value to a positive float and adds it to the metric. #### metric.gauge block @@ -722,8 +648,7 @@ The following arguments are supported: The valid `action` values are `inc`, `dec`, `set`, `add`, or `sub`. `inc` and `dec` increment and decrement the metric's value by 1 respectively. -If `set`, `add, or `sub` is chosen, the extracted value must be convertible -to a positive float and is set, added to, or subtracted from the metric's value. +If `set`, `add, or `sub` is chosen, the extracted value must be convertible to a positive float and is set, added to, or subtracted from the metric's value. #### metric.histogram block @@ -746,14 +671,12 @@ The following arguments are supported: If `value` is not present, all incoming log entries match. -Label values on created metrics can be dynamic, which can cause exported -metrics to explode in cardinality or go stale, for example, when a stream stops -receiving new logs. To prevent unbounded growth of the `/metrics` endpoint, any -metrics which have not been updated within `max_idle_duration` are removed. The -`max_idle_duration` must be greater or equal to `"1s"`, and it defaults to `"5m"`. +Label values on created metrics can be dynamic, which can cause exported metrics to explode in cardinality or go stale, for example, when a stream stops receiving new logs. +To prevent unbounded growth of the `/metrics` endpoint, any metrics which have not been updated within `max_idle_duration` are removed. +The `max_idle_duration` must be greater or equal to `"1s"`, and it defaults to `"5m"`. -The metric values extracted from the log data are internally converted to -floats. The supported values are the following: +The metric values extracted from the log data are internally converted to floats. +The supported values are the following: * integer * floating point number @@ -764,9 +687,12 @@ floats. The supported values are the following: * true is converted to 1. * false is converted to 0. -The following pipeline creates a counter which increments every time any log line is received by using the `match_all` parameter. The pipeline creates a second counter which adds the byte size of these log lines by using the `count_entry_bytes` parameter. +The following pipeline creates a counter which increments every time any log line is received by using the `match_all` parameter. +The pipeline creates a second counter which adds the byte size of these log lines by using the `count_entry_bytes` parameter. + +These two metrics disappear after 24 hours if no new entries are received, to avoid building up metrics which no longer serve any use. +These two metrics are a good starting point to track the volume of log streams in both the number of entries and their byte size, to identify sources of high-volume or high-cardinality data. -These two metrics disappear after 24 hours if no new entries are received, to avoid building up metrics which no longer serve any use. These two metrics are a good starting point to track the volume of log streams in both the number of entries and their byte size, to identify sources of high-volume or high-cardinality data. ```river stage.metrics { metric.counter { @@ -793,8 +719,7 @@ stage.metrics { } ``` -Here, the first stage uses a regex to extract text in the format -`order_status=` in the log line. +Here, the first stage uses a regex to extract text in the format `order_status=` in the log line. The second stage, defines a counter which increments the `successful_orders_total` and `failed_orders_total` based on the previously extracted values. ```river @@ -821,7 +746,8 @@ stage.metrics { } ``` -In this example, the first stage extracts text in the format of `retries=`, from the log line. The second stage creates a gauge whose current metric value is increased by the number extracted from the retries field. +In this example, the first stage extracts text in the format of `retries=`, from the log line. +The second stage creates a gauge whose current metric value is increased by the number extracted from the retries field. ```river stage.regex { @@ -837,9 +763,7 @@ stage.metrics { } ``` -The following example shows a histogram that reads `response_time` from the extracted -map and places it into a bucket, both increasing the count of the bucket and -the sum for that particular bucket: +The following example shows a histogram that reads `response_time` from the extracted map and places it into a bucket, both increasing the count of the bucket and the sum for that particular bucket: ```river stage.metrics { @@ -854,8 +778,7 @@ stage.metrics { ### stage.multiline block -The `stage.multiline` inner block merges multiple lines into a single block before -passing it on to the next stage in the pipeline. +The `stage.multiline` inner block merges multiple lines into a single block before passing it on to the next stage in the pipeline. The following arguments are supported: @@ -869,13 +792,12 @@ The following arguments are supported: A new block is identified by the RE2 regular expression passed in `firstline`. -Any line that does _not_ match the expression is considered to be part of the -block of the previous match. If no new logs arrive with `max_wait_time`, the -block is sent on. The `max_lines` field defines the maximum number of lines a -block can have. If this is exceeded, a new block is started. +Any line that does _not_ match the expression is considered to be part of the block of the previous match. +If no new logs arrive with `max_wait_time`, the block is sent on. +The `max_lines` field defines the maximum number of lines a block can have. +If this is exceeded, a new block is started. -Let's see how this works in practice with an example stage and a stream of log -entries from a Flask web service. +Let's see how this works in practice with an example stage and a stream of log entries from a Flask web service. ``` stage.multiline { @@ -905,16 +827,12 @@ Exception: Sorry, this route always breaks [2023-01-18 17:42:29] "GET /hello HTTP/1.1" 200 - ``` -All 'blocks' that form log entries of separate web requests start with a -timestamp in square brackets. The stage detects this with the regular -expression in `firstline` to collapse all lines of the traceback into a single -block and thus a single Loki log entry. +All 'blocks' that form log entries of separate web requests start with a timestamp in square brackets. +The stage detects this with the regular expression in `firstline` to collapse all lines of the traceback into a single block and thus a single Loki log entry. ### stage.output block -The `stage.output` inner block configures a processing stage that reads from the -extracted map and changes the content of the log entry that is forwarded -to the next component. +The `stage.output` inner block configures a processing stage that reads from the extracted map and changes the content of the log entry that is forwarded to the next component. The following arguments are supported: @@ -942,19 +860,17 @@ stage.output { ``` The first stage extracts the following key-value pairs into the shared map: + ``` user: John Doe message: hello, world! ``` -Then, the second stage adds `user="John Doe"` to the label set of the log -entry, and the final output stage changes the log line from the original -JSON to `hello, world!`. +Then, the second stage adds `user="John Doe"` to the label set of the log entry, and the final output stage changes the log line from the original JSON to `hello, world!`. ### stage.pack block -The `stage.pack` inner block configures a transforming stage that replaces the log -entry with a JSON object that embeds extracted values and labels with it. +The `stage.pack` inner block configures a transforming stage that replaces the log entry with a JSON object that embeds extracted values and labels with it. The following arguments are supported: @@ -963,16 +879,14 @@ The following arguments are supported: | `labels` | `list(string)` | The values from the extracted data and labels to pack with the log entry. | | yes | | `ingest_timestamp` | `bool` | Whether to replace the log entry timestamp with the time the `pack` stage runs. | `true` | no | -This stage lets you embed extracted values and labels together with the log -line, by packing them into a JSON object. The original message is stored under -the `_entry` key, and all other keys retain their values. This is useful in -cases where you _do_ want to keep a certain label or metadata, but you don't -want it to be indexed as a label due to high cardinality. +This stage lets you embed extracted values and labels together with the log line, by packing them into a JSON object. +The original message is stored under the `_entry` key, and all other keys retain their values. +This is useful in cases where you _do_ want to keep a certain label or metadata, but you don't want it to be indexed as a label due to high cardinality. -The querying capabilities of Loki make it easy to still access this data so it can -be filtered and aggregated at query time. +The querying capabilities of Loki make it easy to still access this data so it can be filtered and aggregated at query time. For example, consider the following log entry: + ``` log_line: "something went wrong" labels: { "level" = "error", "env" = "dev", "user_id" = "f8fas0r" } @@ -985,8 +899,8 @@ stage.pack { } ``` -The stage transforms the log entry into the following JSON object, where the two -embedded labels are removed from the original log entry: +The stage transforms the log entry into the following JSON object, where the two embedded labels are removed from the original log entry: + ```json { "_entry": "something went wrong", @@ -995,19 +909,15 @@ embedded labels are removed from the original log entry: } ``` -At query time, Loki's [`unpack` parser](/docs/loki/latest/logql/log_queries/#unpack) -can be used to access these embedded labels and replace the log line with the -original one stored in the `_entry` field automatically. +At query time, Loki's [`unpack` parser][unpack parser] can be used to access these embedded labels and replace the log line with the original one stored in the `_entry` field automatically. + +[unpack parser]: https://grafana.com/docs/loki/latest/logql/log_queries/#unpack -When combining several log streams to use with the `pack` stage, you can set -`ingest_timestamp` to true to avoid interlaced timestamps and -out-of-order ingestion issues. +When combining several log streams to use with the `pack` stage, you can set `ingest_timestamp` to true to avoid interlaced timestamps and out-of-order ingestion issues. ### stage.regex block -The `stage.regex` inner block configures a processing stage that parses log lines -using regular expressions and uses named capture groups for adding data into -the shared extracted map of values. +The `stage.regex` inner block configures a processing stage that parses log lines using regular expressions and uses named capture groups for adding data into the shared extracted map of values. The following arguments are supported: @@ -1017,19 +927,16 @@ The following arguments are supported: | `source` | `string` | Name from extracted data to parse. If empty, uses the log message. | `""` | no | -The `expression` field needs to be a RE2 regex string. Every matched capture -group is added to the extracted map, so it must be named like: `(?Pre)`. -The name of the capture group is then used as the key in the extracted map for -the matched value. +The `expression` field needs to be a RE2 regex string. +Every matched capture group is added to the extracted map, so it must be named like: `(?Pre)`. +The name of the capture group is then used as the key in the extracted map for the matched value. -Because of how River strings work, any backslashes in `expression` must be -escaped with a double backslash; for example `"\\w"` or `"\\S+"`. +Because of how River strings work, any backslashes in `expression` must be escaped with a double backslash; for example `"\\w"` or `"\\S+"`. If the `source` is empty or missing, then the stage parses the log line itself. If it's set, the stage parses a previously extracted value with the same name. -Given the following log line and regex stage, the extracted values are shown -below: +Given the following log line and regex stage, the extracted values are shown below: ``` 2019-01-01T01:00:00.000000001Z stderr P i'm a log message! @@ -1044,11 +951,10 @@ flags: P, content: i'm a log message ``` -On the other hand, if the `source` value is set, then the regex is applied to -the value stored in the shared map under that name. +On the other hand, if the `source` value is set, then the regex is applied to the value stored in the shared map under that name. + +Let's see what happens when the following log line is put through this two-stage pipeline: -Let's see what happens when the following log line is put through this -two-stage pipeline: ``` {"timestamp":"2022-01-01T01:00:00.000000001Z"} @@ -1062,21 +968,21 @@ stage.regex { ``` The first stage adds the following key-value pair into the extracted map: + ``` time: 2022-01-01T01:00:00.000000001Z ``` -Then, the regex stage parses the value for time from the shared values and -appends the subsequent key-value pair back into the extracted values map: +Then, the regex stage parses the value for time from the shared values and appends the subsequent key-value pair back into the extracted values map: + ``` year: 2022 ``` ### stage.replace block -The `stage.replace` inner block configures a stage that parses a log line using a -regular expression and replaces the log line contents. Named capture groups in -the regex also support adding data into the shared extracted map. +The `stage.replace` inner block configures a stage that parses a log line using a regular expression and replaces the log line contents. +Named capture groups in the regex also support adding data into the shared extracted map. The following arguments are supported: @@ -1087,19 +993,16 @@ The following arguments are supported: | `replace` | `string` | Value replaced by the capture group. | | no | -The `source` field defines the source of data to parse using `expression`. When -`source` is missing or empty, the stage parses the log line itself, but it can -also be used to parse a previously extracted value. The replaced value is -assigned back to the `source` key. +The `source` field defines the source of data to parse using `expression`. +When `source` is missing or empty, the stage parses the log line itself, but it can also be used to parse a previously extracted value. +The replaced value is assigned back to the `source` key. -The `expression` must be a valid RE2 regex. Every named capture group -`(?Pre)` is set into the extracted map with its name. +The `expression` must be a valid RE2 regex. +Every named capture group `(?Pre)` is set into the extracted map with its name. -Because of how River treats backslashes in double-quoted strings, note that all -backslashes in a regex expression must be escaped like `"\\w*"`. +Because of how River treats backslashes in double-quoted strings, note that all backslashes in a regex expression must be escaped like `"\\w*"`. -Let's see how this works with the following log line and stage. Since `source` -is omitted, the replacement occurs on the log line itself. +Let's see how this works with the following log line and stage. Since `source` is omitted, the replacement occurs on the log line itself. ``` 2023-01-01T01:00:00.000000001Z stderr P i'm a log message who has sensitive information with password xyz! @@ -1111,6 +1014,7 @@ stage.replace { ``` The log line is transformed to + ``` 2023-01-01T01:00:00.000000001Z stderr P i'm a log message who has sensitive information with password *****! ``` @@ -1118,6 +1022,7 @@ The log line is transformed to If `replace` is empty, then the captured value is omitted instead. In the following example, `source` is defined. + ``` {"time":"2023-01-01T01:00:00.000000001Z", "level": "info", "msg":"11.11.11.11 - \"POST /loki/api/push/ HTTP/1.1\" 200 932 \"-\" \"Mozilla/5.0\"} @@ -1133,25 +1038,27 @@ stage.replace { ``` The JSON stage adds the following key-value pairs into the extracted map: + ``` time: 2023-01-01T01:00:00.000000001Z level: info msg: "11.11.11.11 - "POST /loki/api/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0" ``` -The `replace` stage acts on the `msg` value. The capture group matches against -`/loki/api/push` and is replaced by `redacted_url`. +The `replace` stage acts on the `msg` value. The capture group matches against `/loki/api/push` and is replaced by `redacted_url`. The `msg` value is finally transformed into: + ``` msg: "11.11.11.11 - "POST redacted_url HTTP/1.1" 200 932 "-" "Mozilla/5.0" ``` -The `replace` field can use a set of templating functions, by utilizing Go's -[text/template](https://pkg.go.dev/text/template) package. +The `replace` field can use a set of templating functions, by utilizing Go's [text/template][] package. + +[text/template]: https://pkg.go.dev/text/template + +Let's see how this works with named capture groups with a sample log line and stage. -Let's see how this works with named capture groups with a sample log line -and stage. ``` 11.11.11.11 - agent [01/Jan/2023:00:00:01 +0200] @@ -1161,9 +1068,9 @@ stage.replace { } ``` -Since `source` is empty, the regex parses the log line itself and extracts the -named capture groups to the shared map of values. The `replace` field acts on -these extracted values and converts them to uppercase: +Since `source` is empty, the regex parses the log line itself and extracts the named capture groups to the shared map of values. +The `replace` field acts on these extracted values and converts them to uppercase: + ``` ip: 11.11.11.11 identd: - @@ -1172,12 +1079,13 @@ timestamp: 01/JAN/2023:00:00:01 +0200 ``` and the log line becomes: + ``` 11.11.11.11 - FRANK [01/JAN/2023:00:00:01 +0200] ``` -The following list contains available functions with examples of -more complex `replace` fields. +The following list contains available functions with examples of more complex `replace` fields. + ``` ToLower, ToUpper, Replace, Trim, TrimLeftTrimRight, TrimPrefix, TrimSuffix, TrimSpace, Hash, Sha2Hash, regexReplaceAll, regexReplaceAllLiteral @@ -1187,9 +1095,8 @@ ToLower, ToUpper, Replace, Trim, TrimLeftTrimRight, TrimPrefix, TrimSuffix, Trim ### stage.sampling block -The `sampling` stage is used to sample the logs. Configuring the value -`rate = 0.1` means that 10% of the logs will continue to be processed. The -remaining 90% of the logs will be dropped. +The `sampling` stage is used to sample the logs. Configuring the value `rate = 0.1` means that 10% of the logs will continue to be processed. +The remaining 90% of the logs will be dropped. The following arguments are supported: @@ -1198,9 +1105,8 @@ The following arguments are supported: | `rate` | `float` | The sampling rate in a range of `[0, 1]` | | yes | | `drop_counter_reason` | `string` | The label to add to `loki_process_dropped_lines_total` metric when logs are dropped by this stage. | sampling_stage | no | -For example, the configuration below will sample 25% of the logs and drop the -remaining 75%. When logs are dropped, the `loki_process_dropped_lines_total` -metric is incremented with an additional `reason=logs_sampling` label. +For example, the configuration below will sample 25% of the logs and drop the remaining 75%. +When logs are dropped, the `loki_process_dropped_lines_total` metric is incremented with an additional `reason=logs_sampling` label. ```river stage.sampling { @@ -1211,8 +1117,7 @@ stage.sampling { ### stage.static_labels block -The `stage.static_labels` inner block configures a static_labels processing stage -that adds a static set of labels to incoming log entries. +The `stage.static_labels` inner block configures a static_labels processing stage that adds a static set of labels to incoming log entries. The following arguments are supported: @@ -1232,13 +1137,11 @@ stage.static_labels { ### stage.template block -The `stage.template` inner block configures a transforming stage that allows users to -manipulate the values in the extracted map by using Go's `text/template` -[package](https://pkg.go.dev/text/template) syntax. This stage is primarily -useful for manipulating and standardizing data from previous stages before -setting them as labels in a subsequent stage. Example use cases are replacing -spaces with underscores, converting uppercase strings to lowercase, or hashing -a value. +The `stage.template` inner block configures a transforming stage that allows users to manipulate the values in the extracted map by using Go's `text/template` [package][] syntax. +This stage is primarily useful for manipulating and standardizing data from previous stages before setting them as labels in a subsequent stage. +Example use cases are replacing spaces with underscores, converting uppercase strings to lowercase, or hashing a value. + +[package]: https://pkg.go.dev/text/template The template stage can also create new keys in the extracted map. @@ -1249,18 +1152,21 @@ The following arguments are supported: | `source` | `string` | Name from extracted data to parse. If the key doesn't exist, a new entry is created. | | yes | | `template` | `string` | Go template string to use. | | yes | -The template string can be any valid template that can be used by Go's `text/template`. It supports all functions from the [sprig package](http://masterminds.github.io/sprig/), as well as the following list of custom functions: +The template string can be any valid template that can be used by Go's `text/template`. +It supports all functions from the [sprig package][], as well as the following list of custom functions: + +[sprig package]: http://masterminds.github.io/sprig/ + ``` ToLower, ToUpper, Replace, Trim, TrimLeftTrimRight, TrimPrefix, TrimSuffix, TrimSpace, Hash, Sha2Hash, regexReplaceAll, regexReplaceAllLiteral ``` -More details on each of these functions can be found in the [supported -functions][] section below. +More details on each of these functions can be found in the [supported functions][] section below. [supported functions]: #supported-functions -Assuming no data is present on the extracted map, the following stage simply -adds the `new_key: "hello_world"`key-value pair to the shared map. +Assuming no data is present on the extracted map, the following stage simply adds the `new_key: "hello_world"` key-value pair to the shared map. + ```river stage.template { source = "new_key" @@ -1269,8 +1175,8 @@ stage.template { ``` If the `source` value exists in the extract fields, its value can be referred to as `.Value` in the template. -The next stage takes the current value of `app` from the extracted map, -converts it to lowercase, and adds a suffix to its value: +The next stage takes the current value of `app` from the extracted map, converts it to lowercase, and adds a suffix to its value: + ```river stage.template { source = "app" @@ -1279,8 +1185,8 @@ stage.template { ``` Any previously extracted keys are available for `template` to expand and use. -The next stage takes the current values for `level`, `app` and `module` and -creates a new key named `output_message`: +The next stage takes the current values for `level`, `app` and `module` and creates a new key named `output_message`: + ```river stage.template { source = "output_msg" @@ -1288,8 +1194,8 @@ stage.template { } ``` -A special key named `Entry` can be used to reference the current line; this can -be useful when you need to append/prepend something to the log line, like this snippet: +A special key named `Entry` can be used to reference the current line; this can be useful when you need to append/prepend something to the log line, like this snippet: + ```river stage.template { source = "message" @@ -1304,6 +1210,7 @@ stage.output { In addition to supporting all functions from the [sprig package](http://masterminds.github.io/sprig/), the `template` stage supports the following custom functions. ##### ToLower and ToUpper + `ToLower` and `ToUpper` convert the entire string to lowercase and uppercase, respectively. @@ -1320,6 +1227,7 @@ stage.template { ``` ##### Replace + The `Replace` function syntax is defined as `{{ Replace }}`. The function returns a copy of the input string, with instances of the `` @@ -1337,6 +1245,7 @@ stage.template { ``` ##### Trim, TrimLeft, TrimRight, TrimSpace, TrimPrefix, TrimSuffix + * `Trim` returns a slice of the string `s` with all leading and trailing Unicode code points contained in `cutset` removed. * `TrimLeft` and `TrimRight` are the same as Trim except that they @@ -1344,7 +1253,9 @@ stage.template { * `TrimSpace` returns a slice of the string s, with all leading and trailing white space removed, as defined by Unicode. * `TrimPrefix` and `TrimSuffix` trim the supplied prefix or suffix, respectively. + Examples: + ```river stage.template { source = "output" @@ -1361,6 +1272,7 @@ stage.template { ``` ##### Regex + `regexReplaceAll` returns a copy of the input string, replacing matches of the Regexp with the replacement string. Inside the replacement string, `$` characters are interpreted as in Expand functions, so for instance, $1 represents the first captured @@ -1382,7 +1294,9 @@ stage.template { ``` ##### Hash and Sha2Hash -`Hash` returns a `Sha3_256` hash of the string, represented as a hexadecimal number of 64 digits. You can use it to obfuscate sensitive data and PII in the logs. It requires a (fixed) salt value, to add complexity to low input domains (e.g., all possible social security numbers). + +`Hash` returns a `Sha3_256` hash of the string, represented as a hexadecimal number of 64 digits. You can use it to obfuscate sensitive data and PII in the logs. +It requires a (fixed) salt value, to add complexity to low input domains (e.g., all possible social security numbers). `Sha2Hash` returns a `Sha2_256` of the string which is faster and less CPU-intensive than `Hash`, however it is less secure. Examples: @@ -1423,6 +1337,7 @@ stage.tenant { This stage extracts the tenant ID from the `customer_id` field after parsing the log entry as JSON in the shared extracted map: + ```river stage.json { expressions = { "customer_id" = "" } @@ -1433,6 +1348,7 @@ stage.tenant { ``` The final example extracts the tenant ID from a label set by a previous stage: + ```river stage.labels { "namespace" = "k8s_namespace" @@ -1469,8 +1385,8 @@ the stage should attempt to parse as a timestamp. The `format` field defines _how_ that source should be parsed. -First off, the `format` can be set to one of the following shorthand values for -commonly-used forms: +First off, the `format` can be set to one of the following shorthand values for commonly-used forms: + ``` ANSIC: Mon Jan _2 15:04:05 2006 UnixDate: Mon Jan _2 15:04:05 MST 2006 @@ -1486,6 +1402,7 @@ RFC3339Nano: 2006-01-02T15:04:05.999999999-07:00 Additionally, support for common Unix timestamps is supported with the following format values: + ``` Unix: 1562708916 or with fractions 1562708916.000000123 UnixMs: 1562708916414 @@ -1710,7 +1627,7 @@ loki.process "example" { } } ``` -The `json` stage extracts the IP address from the `client_ip` key in the log line. +The `json` stage extracts the IP address from the `client_ip` key in the log line. Then the extracted `ip` value is given as source to geoip stage. The geoip stage performs a lookup on the IP and populates the shared map with the data from the city database results in addition to the custom lookups. Lastly, the custom lookup fields from the shared map are added as labels. ## Exported fields @@ -1730,6 +1647,7 @@ The following fields are exported and can be referenced by other components: `loki.process` does not expose any component-specific debug information. ## Debug metrics + * `loki_process_dropped_lines_total` (counter): Number of lines dropped as part of a processing stage. * `loki_process_dropped_lines_by_label_total` (counter): Number of lines dropped when `by_label_name` is non-empty in [stage.limit][]. diff --git a/docs/sources/flow/reference/components/loki.relabel.md b/docs/sources/reference/components/loki.relabel.md similarity index 70% rename from docs/sources/flow/reference/components/loki.relabel.md rename to docs/sources/reference/components/loki.relabel.md index 04f548da51..389372467e 100644 --- a/docs/sources/flow/reference/components/loki.relabel.md +++ b/docs/sources/reference/components/loki.relabel.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.relabel/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.relabel/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.relabel/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.relabel/ description: Learn about loki.relabel title: loki.relabel --- @@ -27,10 +22,9 @@ calling the function in the `rules` export field. If you're looking for a way to process the log entry contents, take a look at [the `loki.process` component][loki.process] instead. -[loki.process]: {{< relref "./loki.process.md" >}} +[loki.process]: ../loki.process/ -Multiple `loki.relabel` components can be specified by giving them -different labels. +Multiple `loki.relabel` components can be specified by giving them different labels. ## Usage @@ -50,32 +44,32 @@ loki.relabel "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`forward_to` | `list(receiver)` | Where to forward log entries after relabeling. | | yes -`max_cache_size` | `int` | The maximum number of elements to hold in the relabeling cache | 10,000 | no +Name | Type | Description | Default | Required +-----------------|------------------|----------------------------------------------------------------|---------|--------- +`forward_to` | `list(receiver)` | Where to forward log entries after relabeling. | | yes +`max_cache_size` | `int` | The maximum number of elements to hold in the relabeling cache | 10,000 | no ## Blocks The following blocks are supported inside the definition of `loki.relabel`: -Hierarchy | Name | Description | Required ---------- | ---- | ----------- | -------- -rule | [rule][] | Relabeling rules to apply to received log entries. | no +Hierarchy | Name | Description | Required +----------|----------|----------------------------------------------------|--------- +rule | [rule][] | Relabeling rules to apply to received log entries. | no [rule]: #rule-block ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block-logs.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block-logs.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- -`receiver` | `receiver` | The input receiver where log lines are sent to be relabeled. +Name | Type | Description +-----------|----------------|------------------------------------------------------------- +`receiver` | `receiver` | The input receiver where log lines are sent to be relabeled. `rules` | `RelabelRules` | The currently configured relabeling rules. ## Component health @@ -97,8 +91,7 @@ In those cases, exported fields are kept at their last healthy values. ## Example -The following example creates a `loki.relabel` component that only forwards -entries whose 'level' value is set to 'error'. +The following example creates a `loki.relabel` component that only forwards entries whose 'level' value is set to 'error'. ```river loki.relabel "keep_error_only" { diff --git a/docs/sources/flow/reference/components/loki.rules.kubernetes.md b/docs/sources/reference/components/loki.rules.kubernetes.md similarity index 77% rename from docs/sources/flow/reference/components/loki.rules.kubernetes.md rename to docs/sources/reference/components/loki.rules.kubernetes.md index ffb932df24..3b3f6a28b8 100644 --- a/docs/sources/flow/reference/components/loki.rules.kubernetes.md +++ b/docs/sources/reference/components/loki.rules.kubernetes.md @@ -6,7 +6,7 @@ labels: # loki.rules.kubernetes -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `loki.rules.kubernetes` discovers `PrometheusRule` Kubernetes resources and loads them into a Loki instance. @@ -22,7 +22,7 @@ loads them into a Loki instance. This component requires [Role-based access control (RBAC)][] to be set up in Kubernetes for {{< param "PRODUCT_ROOT_NAME" >}} to access it via the Kubernetes REST API. -Role-based access control (RBAC)]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ +[Role-based access control (RBAC)]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ {{< /admonition >}} [Kubernetes label selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors @@ -41,18 +41,18 @@ loki.rules.kubernetes "LABEL" { `loki.rules.kubernetes` supports the following arguments: -Name | Type | Description | Default | Required --------------------------|------------|----------------------------------------------------------|---------|--------- -`address` | `string` | URL of the Loki ruler. | | yes -`tenant_id` | `string` | Loki tenant ID. | | no -`use_legacy_routes` | `bool` | Whether to use deprecated ruler API endpoints. | false | no -`sync_interval` | `duration` | Amount of time between reconciliations with Loki. | "30s" | no +Name | Type | Description | Default | Required +------------------------|------------|--------------------------------------------------------------------------------------|---------|--------- +`address` | `string` | URL of the Loki ruler. | | yes +`tenant_id` | `string` | Loki tenant ID. | | no +`use_legacy_routes` | `bool` | Whether to use deprecated ruler API endpoints. | false | no +`sync_interval` | `duration` | Amount of time between reconciliations with Loki. | "30s" | no `loki_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_ROOT_NAME" >}} deployments. | "agent" | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -109,9 +109,9 @@ The `label_selector` block describes a Kubernetes label selector for rule or nam The following arguments are supported: -Name | Type | Description | Default | Required ----------------|---------------|---------------------------------------------------|-----------------------------|--------- -`match_labels` | `map(string)` | Label keys and values used to discover resources. | `{}` | yes +Name | Type | Description | Default | Required +---------------|---------------|---------------------------------------------------|---------|--------- +`match_labels` | `map(string)` | Label keys and values used to discover resources. | `{}` | yes When the `match_labels` argument is empty, all resources will be matched. @@ -121,11 +121,11 @@ The `match_expression` block describes a Kubernetes label match expression for r The following arguments are supported: -Name | Type | Description | Default | Required ------------|----------------|----------------------------------------------------|---------|--------- -`key` | `string` | The label name to match against. | | yes -`operator` | `string` | The operator to use when matching. | | yes -`values` | `list(string)` | The values used when matching. | | no +Name | Type | Description | Default | Required +-----------|----------------|------------------------------------|---------|--------- +`key` | `string` | The label name to match against. | | yes +`operator` | `string` | The operator to use when matching. | | yes +`values` | `list(string)` | The values used when matching. | | no The `operator` argument should be one of the following strings: @@ -135,19 +135,19 @@ The `operator` argument should be one of the following strings: ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -176,8 +176,8 @@ actually exist. ## Debug metrics -Metric Name | Type | Description -----------------------------------------------|-------------|------------------------------------------------------------------------- +Metric Name | Type | Description +---------------------------------------------|-------------|------------------------------------------------------------------------- `loki_rules_config_updates_total` | `counter` | Number of times the configuration has been updated. `loki_rules_events_total` | `counter` | Number of events processed, partitioned by event type. `loki_rules_events_failed_total` | `counter` | Number of events that failed to be processed, partitioned by event type. diff --git a/docs/sources/flow/reference/components/loki.source.api.md b/docs/sources/reference/components/loki.source.api.md similarity index 83% rename from docs/sources/flow/reference/components/loki.source.api.md rename to docs/sources/reference/components/loki.source.api.md index cc508ad976..186a265514 100644 --- a/docs/sources/flow/reference/components/loki.source.api.md +++ b/docs/sources/reference/components/loki.source.api.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.api/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.api/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.api/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.api/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.api/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.api/ description: Learn about loki.source.api title: loki.source.api --- @@ -13,9 +8,10 @@ title: loki.source.api `loki.source.api` receives log entries over HTTP and forwards them to other `loki.*` components. -The HTTP API exposed is compatible with [Loki push API][loki-push-api] and the `logproto` format. This means that other [`loki.write`][loki.write] components can be used as a client and send requests to `loki.source.api` which enables using the Agent as a proxy for logs. +The HTTP API exposed is compatible with [Loki push API][loki-push-api] and the `logproto` format. +This means that other [`loki.write`][loki.write] components can be used as a client and send requests to `loki.source.api` which enables using the Agent as a proxy for logs. -[loki.write]: {{< relref "./loki.write.md" >}} +[loki.write]: ../loki.write/ [loki-push-api]: https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki ## Usage @@ -39,7 +35,7 @@ The component will start HTTP server on the configured port and address with the - `/api/v1/raw` - internally reroutes to `/loki/api/v1/raw` -[promtail-push-api]: /docs/loki/latest/clients/promtail/configuration/#loki_push_api +[promtail-push-api]: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#loki_push_api ## Arguments @@ -55,7 +51,7 @@ Name | Type | Description The `relabel_rules` field can make use of the `rules` export value from a [`loki.relabel`][loki.relabel] component to apply one or more relabeling rules to log entries before they're forwarded to the list of receivers in `forward_to`. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Blocks @@ -69,7 +65,7 @@ Hierarchy | Name | Description | Requ ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-http.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/loki.source.awsfirehose.md b/docs/sources/reference/components/loki.source.awsfirehose.md similarity index 63% rename from docs/sources/flow/reference/components/loki.source.awsfirehose.md rename to docs/sources/reference/components/loki.source.awsfirehose.md index 2d43d6f82b..3b25e9e2c1 100644 --- a/docs/sources/flow/reference/components/loki.source.awsfirehose.md +++ b/docs/sources/reference/components/loki.source.awsfirehose.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.awsfirehose/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.awsfirehose/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.awsfirehose/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.awsfirehose/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.awsfirehose/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.awsfirehose/ description: Learn about loki.source.awsfirehose title: loki.source.awsfirehose --- @@ -36,21 +31,21 @@ the raw records to Loki. The decoding process goes as follows: The component exposes some internal labels, available for relabeling. The following tables describes internal labels available in records coming from any source. -| Name | Description | Example | -|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------| -| `__aws_firehose_request_id` | Firehose request ID. | `a1af4300-6c09-4916-ba8f-12f336176246` | -| `__aws_firehose_source_arn` | Firehose delivery stream ARN. | `arn:aws:firehose:us-east-2:123:deliverystream/aws_firehose_test_stream` | +| Name | Description | Example | +|-----------------------------|-------------------------------|--------------------------------------------------------------------------| +| `__aws_firehose_request_id` | Firehose request ID. | `a1af4300-6c09-4916-ba8f-12f336176246` | +| `__aws_firehose_source_arn` | Firehose delivery stream ARN. | `arn:aws:firehose:us-east-2:123:deliverystream/aws_firehose_test_stream` | If the source of the Firehose record is CloudWatch logs, the request is further decoded and enriched with even more labels, exposed as follows: -| Name | Description | Example | -|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------| -| `__aws_owner` | The AWS Account ID of the originating log data. | `111111111111` | -| `__aws_cw_log_group` | The log group name of the originating log data. | `CloudTrail/logs` | -| `__aws_cw_log_stream` | The log stream name of the originating log data. | `111111111111_CloudTrail/logs_us-east-1` | -| `__aws_cw_matched_filters` | The list of subscription filter names that match the originating log data. The list is encoded as a comma-separated list. | `Destination,Destination2` | -| `__aws_cw_msg_type` | Data messages will use the `DATA_MESSAGE` type. Sometimes CloudWatch Logs may emit Kinesis Data Streams records with a `CONTROL_MESSAGE` type, mainly for checking if the destination is reachable. | `DATA_MESSAGE` | +| Name | Description | Example | +|----------------------------|---------------------------------------------------------------------------------------------------------------------------|------------------------------------------| +| `__aws_owner` | The AWS Account ID of the originating log data. | `111111111111` | +| `__aws_cw_log_group` | The log group name of the originating log data. | `CloudTrail/logs` | +| `__aws_cw_log_stream` | The log stream name of the originating log data. | `111111111111_CloudTrail/logs_us-east-1` | +| `__aws_cw_matched_filters` | The list of subscription filter names that match the originating log data. The list is encoded as a comma-separated list. | `Destination,Destination2` | +| `__aws_cw_msg_type` | Data messages will use the `DATA_MESSAGE` type. Sometimes CloudWatch Logs may emit Kinesis Data Streams records with a `CONTROL_MESSAGE` type, mainly for checking if the destination is reachable. | `DATA_MESSAGE` | See [Examples](#example) for a full example configuration showing how to enrich each log entry with these labels. @@ -68,8 +63,7 @@ loki.source.awsfirehose "LABEL" { The component will start an HTTP server on the configured port and address with the following endpoints: -- `/awsfirehose/api/v1/push` - accepting `POST` requests compatible - with [AWS Firehose HTTP Specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html). +- `/awsfirehose/api/v1/push` - accepting `POST` requests compatible with [AWS Firehose HTTP Specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html). ## Arguments @@ -86,28 +80,27 @@ The `relabel_rules` field can make use of the `rules` export value from a [`loki.relabel`][loki.relabel] component to apply one or more relabeling rules to log entries before they're forwarded to the list of receivers in `forward_to`. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Blocks The following blocks are supported inside the definition of `loki.source.awsfirehose`: | Hierarchy | Name | Description | Required | - |-----------|----------|----------------------------------------------------|----------| +|-----------|----------|----------------------------------------------------|----------| | `http` | [http][] | Configures the HTTP server that receives requests. | no | | `grpc` | [grpc][] | Configures the gRPC server that receives requests. | no | [http]: #http - [grpc]: #grpc ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-http.md" source="alloy" version="" >}} ### grpc -{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-grpc.md" source="alloy" version="" >}} ## Exported fields @@ -119,7 +112,8 @@ The following blocks are supported inside the definition of `loki.source.awsfire ## Debug metrics -The following are some of the metrics that are exposed when this component is used. +The following are some of the metrics that are exposed when this component is used. + {{< admonition type="note" >}} The metrics include labels such as `status_code` where relevant, which you can use to measure request success rates. {{< /admonition >}} diff --git a/docs/sources/reference/components/loki.source.azure_event_hubs.md b/docs/sources/reference/components/loki.source.azure_event_hubs.md new file mode 100644 index 0000000000..667ebba912 --- /dev/null +++ b/docs/sources/reference/components/loki.source.azure_event_hubs.md @@ -0,0 +1,149 @@ +--- + +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.azure_event_hubs/ +description: Learn about loki.source.azure_event_hubs +title: loki.source.azure_event_hubs +--- + +# loki.source.azure_event_hubs + +`loki.source.azure_event_hubs` receives Azure Event Hubs messages by making use of an Apache Kafka +endpoint on Event Hubs. For more information, see +the [Azure Event Hubs documentation](https://learn.microsoft.com/en-us/azure/event-hubs/azure-event-hubs-kafka-overview). + +To learn more about streaming Azure logs to an Azure Event Hubs, refer to +Microsoft's tutorial on how to [Stream Azure Active Directory logs to an Azure event hub](https://learn.microsoft.com/en-us/azure/active-directory/reports-monitoring/tutorial-azure-monitor-stream-logs-to-event-hub). + +Note that an Apache Kafka endpoint is not available within the Basic pricing plan. For more information, see +the [Event Hubs pricing page](https://azure.microsoft.com/en-us/pricing/details/event-hubs/). + +Multiple `loki.source.azure_event_hubs` components can be specified by giving them +different labels. + +## Usage + +```river +loki.source.azure_event_hubs "LABEL" { + fully_qualified_namespace = "HOST:PORT" + event_hubs = EVENT_HUB_LIST + forward_to = RECEIVER_LIST + + authentication { + mechanism = "AUTHENTICATION_MECHANISM" + } +} +``` + +## Arguments + +`loki.source.azure_event_hubs` supports the following arguments: + +Name | Type | Description | Default | Required +----------------------------|----------------------|--------------------------------------------------------------------|----------------------------------|--------- +`fully_qualified_namespace` | `string` | Event hub namespace. | | yes +`event_hubs` | `list(string)` | Event Hubs to consume. | | yes +`group_id` | `string` | The Kafka consumer group id. | `"loki.source.azure_event_hubs"` | no +`assignor` | `string` | The consumer group rebalancing strategy to use. | `"range"` | no +`use_incoming_timestamp` | `bool` | Whether or not to use the timestamp received from Azure Event Hub. | `false` | no +`labels` | `map(string)` | The labels to associate with each received event. | `{}` | no +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no +`disallow_custom_messages` | `bool` | Whether to ignore messages that don't match the [schema](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema) for Azure resource logs. | `false` | no +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no + +The `fully_qualified_namespace` argument must refer to a full `HOST:PORT` that points to your event hub, such as `NAMESPACE.servicebus.windows.net:9093`. +The `assignor` argument must be set to one of `"range"`, `"roundrobin"`, or `"sticky"`. + +The `relabel_rules` field can make use of the `rules` export value from a +`loki.relabel` component to apply one or more relabeling rules to log entries +before they're forwarded to the list of receivers in `forward_to`. + +### Labels + +The `labels` map is applied to every message that the component reads. + +The following internal labels prefixed with `__` are available but are discarded if not relabeled: + +- `__meta_kafka_message_key` +- `__meta_kafka_topic` +- `__meta_kafka_partition` +- `__meta_kafka_member_id` +- `__meta_kafka_group_id` +- `__azure_event_hubs_category` + +## Blocks + +The following blocks are supported inside the definition of `loki.source.azure_event_hubs`: + +Hierarchy | Name | Description | Required +---------------|------------------|----------------------------------------------------|--------- +authentication | [authentication] | Authentication configuration with Azure Event Hub. | yes + +[authentication]: #authentication-block + +### authentication block + +The `authentication` block defines the authentication method when communicating with Azure Event Hub. + +Name | Type | Description | Default | Required +--------------------|----------------|---------------------------------------------------------------------------|---------|--------- +`mechanism` | `string` | Authentication mechanism. | | yes +`connection_string` | `string` | Event Hubs ConnectionString for authentication on Azure Cloud. | | no +`scopes` | `list(string)` | Access token scopes. Default is `fully_qualified_namespace` without port. | | no + +`mechanism` supports the values `"connection_string"` and `"oauth"`. If `"connection_string"` is used, +you must set the `connection_string` attribute. If `"oauth"` is used, you must configure one of the supported credential +types as documented +here: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/README.md#credential-types via environment +variables or Azure CLI. + +## Exported fields + +`loki.source.azure_event_hubs` does not export any fields. + +## Component health + +`loki.source.azure_event_hubs` is only reported as unhealthy if given an invalid +configuration. + +## Debug information + +`loki.source.azure_event_hubs` does not expose additional debug info. + +## Example + +This example consumes messages from Azure Event Hub and uses OAuth to authenticate itself. + +```river +loki.source.azure_event_hubs "example" { + fully_qualified_namespace = "my-ns.servicebus.windows.net:9093" + event_hubs = ["gw-logs"] + forward_to = [loki.write.example.receiver] + + authentication { + mechanism = "oauth" + } +} + +loki.write "example" { + endpoint { + url = "loki:3100/api/v1/push" + } +} +``` + + + +## Compatible components + +`loki.source.azure_event_hubs` can accept arguments from the following components: + +- Components that export [Loki `LogsReceiver`](../../compatibility/#loki-logsreceiver-exporters) + + +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} + + diff --git a/docs/sources/flow/reference/components/loki.source.cloudflare.md b/docs/sources/reference/components/loki.source.cloudflare.md similarity index 82% rename from docs/sources/flow/reference/components/loki.source.cloudflare.md rename to docs/sources/reference/components/loki.source.cloudflare.md index dbbd2e57b1..ab556885f6 100644 --- a/docs/sources/flow/reference/components/loki.source.cloudflare.md +++ b/docs/sources/reference/components/loki.source.cloudflare.md @@ -1,10 +1,6 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.cloudflare/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.cloudflare/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.cloudflare/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.cloudflare/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.cloudflare/ + +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.cloudflare/ description: Learn about loki.source.cloudflare title: loki.source.cloudflare --- @@ -36,16 +32,16 @@ loki.source.cloudflare "LABEL" { `loki.source.cloudflare` supports the following arguments: -Name | Type | Description | Default | Required ---------------- | -------------------- | -------------------- | ------- | -------- -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes -`api_token` | `string` | The API token to authenticate with. | | yes -`zone_id` | `string` | The Cloudflare zone ID to use. | | yes -`labels` | `map(string)` | The labels to associate with incoming log entries. | `{}` | no -`workers` | `int` | The number of workers to use for parsing logs. | `3` | no -`pull_range` | `duration` | The timeframe to fetch for each pull request. | `"1m"` | no -`fields_type` | `string` | The set of fields to fetch for log entries. | `"default"` | no -`additional_fields` | `list(string)` | The additional list of fields to supplement those provided via `fields_type`. | | no +Name | Type | Description | Default | Required +--------------------|----------------------|-------------------------------------------------------------------------------|-------------|--------- +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`api_token` | `string` | The API token to authenticate with. | | yes +`zone_id` | `string` | The Cloudflare zone ID to use. | | yes +`labels` | `map(string)` | The labels to associate with incoming log entries. | `{}` | no +`workers` | `int` | The number of workers to use for parsing logs. | `3` | no +`pull_range` | `duration` | The timeframe to fetch for each pull request. | `"1m"` | no +`fields_type` | `string` | The set of fields to fetch for log entries. | `"default"` | no +`additional_fields` | `list(string)` | The additional list of fields to supplement those provided via `fields_type`. | | no By default `loki.source.cloudflare` fetches logs with the `default` set of @@ -74,6 +70,7 @@ plus any extra fields provided via `additional_fields` argument. ``` "BotScore", "BotScoreSrc", "BotTags", "ClientRequestBytes", "ClientSrcPort", "ClientXRequestedWith", "CacheTieredFill", "EdgeResponseCompressionRatio", "EdgeServerIP", "FirewallMatchesSources", "FirewallMatchesActions", "FirewallMatchesRuleIDs", "OriginResponseBytes", "OriginResponseTime", "ClientDeviceType", "WAFFlags", "WAFMatchedVar", "EdgeColoID", "RequestHeaders", "ResponseHeaders", "ClientRequestSource"` ``` + plus any extra fields provided via `additional_fields` argument (this is still relevant in this case if new fields are made available via Cloudflare API but are not yet included in `all`). * `custom` includes only the fields defined in `additional_fields`. @@ -95,6 +92,7 @@ The last timestamp fetched by the component is recorded in the All incoming Cloudflare log entries are in JSON format. You can make use of the `loki.process` component and a JSON processing stage to extract more labels or change the log line format. A sample log looks like this: + ```json { "CacheCacheStatus": "miss", @@ -165,7 +163,6 @@ change the log line format. A sample log looks like this: } ``` - ## Exported fields `loki.source.cloudflare` does not export any fields. @@ -181,8 +178,7 @@ configuration. * Whether the target is ready and reading logs from the API. * The Cloudflare zone ID. * The last error reported, if any. -* The stored positions file entry, as the combination of zone_id, labels and - last fetched timestamp. +* The stored positions file entry, as the combination of zone_id, labels and last fetched timestamp. * The last timestamp fetched. * The set of fields being fetched. diff --git a/docs/sources/flow/reference/components/loki.source.docker.md b/docs/sources/reference/components/loki.source.docker.md similarity index 55% rename from docs/sources/flow/reference/components/loki.source.docker.md rename to docs/sources/reference/components/loki.source.docker.md index 09b88a7436..c8f16e2d90 100644 --- a/docs/sources/flow/reference/components/loki.source.docker.md +++ b/docs/sources/reference/components/loki.source.docker.md @@ -1,23 +1,14 @@ --- -aliases: -- /docs/agent/latest/flow/reference/components/loki.source.docker/ -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.docker/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.docker/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.docker/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.docker/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.docker/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.docker/ description: Learn about loki.source.docker title: loki.source.docker --- # loki.source.docker -`loki.source.docker` reads log entries from Docker containers and forwards them -to other `loki.*` components. Each component can read from a single Docker -daemon. +`loki.source.docker` reads log entries from Docker containers and forwards them to other `loki.*` components. Each component can read from a single Docker daemon. -Multiple `loki.source.docker` components can be specified by giving them -different labels. +Multiple `loki.source.docker` components can be specified by giving them different labels. ## Usage @@ -30,32 +21,31 @@ loki.source.docker "LABEL" { ``` ## Arguments -The component starts a new reader for each of the given `targets` and fans out -log entries to the list of receivers passed in `forward_to`. +The component starts a new reader for each of the given `targets` and fans out log entries to the list of receivers passed in `forward_to`. `loki.source.docker` supports the following arguments: -Name | Type | Description | Default | Required ---------------- | -------------------- | -------------------- | ------- | -------- -`host` | `string` | Address of the Docker daemon. | | yes -`targets` | `list(map(string))` | List of containers to read logs from. | | yes -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes -`labels` | `map(string)` | The default set of labels to apply on entries. | `"{}"` | no -`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `"{}"` | no -`refresh_interval` | `duration` | The refresh interval to use when connecting to the Docker daemon over HTTP(S). | `"60s"` | no +Name | Type | Description | Default | Required +-------------------|----------------------|--------------------------------------------------------------------------------|---------|--------- +`host` | `string` | Address of the Docker daemon. | | yes +`targets` | `list(map(string))` | List of containers to read logs from. | | yes +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`labels` | `map(string)` | The default set of labels to apply on entries. | `"{}"` | no +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `"{}"` | no +`refresh_interval` | `duration` | The refresh interval to use when connecting to the Docker daemon over HTTP(S). | `"60s"` | no ## Blocks The following blocks are supported inside the definition of `loki.source.docker`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | HTTP client settings when connecting to the endpoint. | no -client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +-----------------------------|-------------------|----------------------------------------------------------|--------- +client | [client][] | HTTP client settings when connecting to the endpoint. | no +client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to an `basic_auth` block defined inside a `client` block. @@ -71,38 +61,33 @@ or HTTPS and has no effect when connecting via a `unix:///` socket ### client block -The `client` block configures settings used to connect to HTTP(S) Docker -daemons. +The `client` block configures settings used to connect to HTTP(S) Docker daemons. -{{< docs/shared lookup="flow/reference/components/http-client-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-config-block.md" source="alloy" version="" >}} ### basic_auth block -The `basic_auth` block configures basic authentication for HTTP(S) Docker -daemons. +The `basic_auth` block configures basic authentication for HTTP(S) Docker daemons. -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -The `authorization` block configures custom authorization to use for the Docker -daemon. +The `authorization` block configures custom authorization to use for the Docker daemon. -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -The `oauth2` block configures OAuth2 authorization to use for the Docker -daemon. +The `oauth2` block configures OAuth2 authorization to use for the Docker daemon. -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -The `tls_config` block configures TLS settings for connecting to HTTPS Docker -daemons. +The `tls_config` block configures TLS settings for connecting to HTTPS Docker daemons. -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -135,7 +120,7 @@ If the target's argument contains multiple entries with the same container ID (for example as a result of `discovery.docker` picking up multiple exposed ports or networks), `loki.source.docker` will deduplicate them, and only keep the first of each container ID instances, based on the -`__meta_docker_container_id` label. As such, the Docker daemon is queried +`__meta_docker_container_id` label. As such, the Docker daemon is queried for each container ID only once, and only one target will be available in the component's debug info. @@ -151,7 +136,7 @@ discovery.docker "linux" { loki.source.docker "default" { host = "unix:///var/run/docker.sock" - targets = discovery.docker.linux.targets + targets = discovery.docker.linux.targets forward_to = [loki.write.local.receiver] } diff --git a/docs/sources/flow/reference/components/loki.source.file.md b/docs/sources/reference/components/loki.source.file.md similarity index 87% rename from docs/sources/flow/reference/components/loki.source.file.md rename to docs/sources/reference/components/loki.source.file.md index 683b66cabf..aba7803e26 100644 --- a/docs/sources/flow/reference/components/loki.source.file.md +++ b/docs/sources/reference/components/loki.source.file.md @@ -1,24 +1,19 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.file/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.file/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.file/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.file/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.file/ description: Learn about loki.source.file title: loki.source.file --- # loki.source.file -`loki.source.file` reads log entries from files and forwards them to other -`loki.*` components. +`loki.source.file` reads log entries from files and forwards them to other `loki.*` components. Multiple `loki.source.file` components can be specified by giving them different labels. {{< admonition type="note" >}} -`loki.source.file` does not handle file discovery. You can use `local.file_match` for file discovery. Refer to the [File Globbing](#file-globbing) example for more information. +`loki.source.file` does not handle file discovery. You can use `local.file_match` for file discovery. +Refer to the [File Globbing](#file-globbing) example for more information. {{< /admonition >}} ## Usage @@ -32,8 +27,7 @@ loki.source.file "LABEL" { ## Arguments -The component starts a new reader for each of the given `targets` and fans out -log entries to the list of receivers passed in `forward_to`. +The component starts a new reader for each of the given `targets` and fans out log entries to the list of receivers passed in `forward_to`. `loki.source.file` supports the following arguments: @@ -54,10 +48,10 @@ When set to true, only new logs will be read, ignoring the existing ones. The following blocks are supported inside the definition of `loki.source.file`: -| Hierarchy | Name | Description | Required | -| -------------- | ------------------ | ----------------------------------------------------------------- | -------- | -| decompression | [decompression][] | Configure reading logs from compressed files. | no | -| file_watch | [file_watch][] | Configure how often files should be polled from disk for changes. | no | +| Hierarchy | Name | Description | Required | +|---------------|-------------------|-------------------------------------------------------------------|----------| +| decompression | [decompression][] | Configure reading logs from compressed files. | no | +| file_watch | [file_watch][] | Configure how often files should be polled from disk for changes. | no | [decompression]: #decompression-block [file_watch]: #file_watch-block @@ -130,8 +124,7 @@ configuration. If the decompression feature is deactivated, the component will continuously monitor and 'tail' the files. In this mode, upon reaching the end of a file, the component remains active, awaiting and reading new entries in real-time as they are appended. -Each element in the list of `targets` as a set of key-value pairs called -_labels_. +Each element in the list of `targets` as a set of key-value pairs called _labels_. The set of targets can either be _static_, or dynamically provided periodically by a service discovery component. The special label `__path__` _must always_ be present and must point to the absolute path of the file to read from. @@ -154,7 +147,7 @@ If a file is removed from the `targets` list, its positions file entry is also removed. When it's added back on, `loki.source.file` starts reading it from the beginning. -[cmd-args]: {{< relref "../cli/run.md" >}} +[cmd-args]: ../../cli/run/ ## Examples diff --git a/docs/sources/flow/reference/components/loki.source.gcplog.md b/docs/sources/reference/components/loki.source.gcplog.md similarity index 84% rename from docs/sources/flow/reference/components/loki.source.gcplog.md rename to docs/sources/reference/components/loki.source.gcplog.md index d57cf28cc0..77c7ebb8c3 100644 --- a/docs/sources/flow/reference/components/loki.source.gcplog.md +++ b/docs/sources/reference/components/loki.source.gcplog.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.gcplog/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.gcplog/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.gcplog/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.gcplog/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.gcplog/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.gcplog/ description: Learn about loki.source.gcplog title: loki.source.gcplog --- @@ -18,8 +13,7 @@ load balancers, or Kubernetes clusters running on GCP by making use of Pub/Sub The component uses either the 'push' or 'pull' strategy to retrieve log entries and forward them to the list of receivers in `forward_to`. -Multiple `loki.source.gcplog` components can be specified by giving them -different labels. +Multiple `loki.source.gcplog` components can be specified by giving them different labels. ## Usage @@ -99,30 +93,28 @@ push requests from GCP's Pub/Sub servers. The following arguments can be used to configure the `push` block. Any omitted fields take their default values. -| Name | Type | Description | Default | Required | -|-----------------------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|---------|----------| -| `graceful_shutdown_timeout` | `duration` | Timeout for servers graceful shutdown. If configured, should be greater than zero. | "30s" | no | -| `push_timeout` | `duration` | Sets a maximum processing time for each incoming GCP log entry. | `"0s"` | no | -| `labels` | `map(string)` | Additional labels to associate with incoming entries. | `"{}"` | no | -| `use_incoming_timestamp` | `bool` | Whether to use the incoming entry timestamp. | `false` | no | -| `use_full_line` | `bool` | Send the full line from Cloud Logging even if `textPayload` is available. By default, if `textPayload` is present in the line, then it's used as log line | `false` | no | +| Name | Type | Description | Default | Required | +|-----------------------------|---------------|------------------------------------------------------------------------------------|---------|----------| +| `graceful_shutdown_timeout` | `duration` | Timeout for servers graceful shutdown. If configured, should be greater than zero. | "30s" | no | +| `push_timeout` | `duration` | Sets a maximum processing time for each incoming GCP log entry. | `"0s"` | no | +| `labels` | `map(string)` | Additional labels to associate with incoming entries. | `"{}"` | no | +| `use_incoming_timestamp` | `bool` | Whether to use the incoming entry timestamp. | `false` | no | +| `use_full_line` | `bool` | Send the full line from Cloud Logging even if `textPayload` is available. By default, if `textPayload` is present in the line, then it's used as log line | `false` | no | -The server listens for POST requests from GCP's Push subscriptions on -`HOST:PORT/gcp/api/v1/push`. +The server listens for POST requests from GCP's Push subscriptions on `HOST:PORT/gcp/api/v1/push`. By default, for both strategies the component assigns the log entry timestamp -as the time it was processed, except if `use_incoming_timestamp` is set to -true. +as the time it was processed, except if `use_incoming_timestamp` is set to true. The `labels` map is applied to every entry that passes through the component. ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-http.md" source="alloy" version="" >}} ### grpc -{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-grpc.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/loki.source.gelf.md b/docs/sources/reference/components/loki.source.gelf.md similarity index 69% rename from docs/sources/flow/reference/components/loki.source.gelf.md rename to docs/sources/reference/components/loki.source.gelf.md index eec3ef5c9a..0d3c508c51 100644 --- a/docs/sources/flow/reference/components/loki.source.gelf.md +++ b/docs/sources/reference/components/loki.source.gelf.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.gelf/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.gelf/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.gelf/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.gelf/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.gelf/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.gelf/ description: Learn about loki.source.gelf title: loki.source.gelf --- @@ -31,11 +26,11 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.gelf` supports the following arguments: -Name | Type | Description | Default | Required ------------- |----------------------|--------------------------------------------------------------------------------|----------------------------| -------- -`listen_address` | `string` | UDP address and port to listen for Graylog messages. | `0.0.0.0:12201` | no -`use_incoming_timestamp` | `bool` | When false, assigns the current timestamp to the log when it was processed | `false` | no -`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | "{}" | no +Name | Type | Description | Default | Required +-------------------------|----------------|----------------------------------------------------------------------------|-----------------|--------- +`listen_address` | `string` | UDP address and port to listen for Graylog messages. | `0.0.0.0:12201` | no +`use_incoming_timestamp` | `bool` | When false, assigns the current timestamp to the log when it was processed | `false` | no +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | "{}" | no > **NOTE**: GELF logs can be sent uncompressed or compressed with GZIP or ZLIB. @@ -56,7 +51,7 @@ All labels starting with `__` are removed prior to forwarding log entries. To keep these labels, relabel them using a [loki.relabel][] component and pass its `rules` export to the `relabel_rules` argument. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Component health diff --git a/docs/sources/flow/reference/components/loki.source.heroku.md b/docs/sources/reference/components/loki.source.heroku.md similarity index 84% rename from docs/sources/flow/reference/components/loki.source.heroku.md rename to docs/sources/reference/components/loki.source.heroku.md index 62aaff4db7..888f2ab99e 100644 --- a/docs/sources/flow/reference/components/loki.source.heroku.md +++ b/docs/sources/reference/components/loki.source.heroku.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.heroku/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.heroku/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.heroku/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.heroku/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.heroku/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.heroku/ description: Learn about loki.source.heroku title: loki.source.heroku --- @@ -17,7 +12,8 @@ and forwards them to other `loki.*` components. The component starts a new heroku listener for the given `listener` block and fans out incoming entries to the list of receivers in `forward_to`. -Before using `loki.source.heroku`, Heroku should be configured with the URL where the Agent will be listening. Follow the steps in [Heroku HTTPS Drain docs](https://devcenter.heroku.com/articles/log-drains#https-drains) for using the Heroku CLI with a command like the following: +Before using `loki.source.heroku`, Heroku should be configured with the URL where the Agent will be listening. +Follow the steps in [Heroku HTTPS Drain docs](https://devcenter.heroku.com/articles/log-drains#https-drains) for using the Heroku CLI with a command like the following: ```shell heroku drains:add [http|https]://HOSTNAME:PORT/heroku/api/v1/drain -a HEROKU_APP_NAME @@ -68,11 +64,11 @@ Hierarchy | Name | Description | Requ ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-http.md" source="alloy" version="" >}} ### grpc -{{< docs/shared lookup="flow/reference/components/loki-server-grpc.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-grpc.md" source="alloy" version="" >}} ## Labels diff --git a/docs/sources/flow/reference/components/loki.source.journal.md b/docs/sources/reference/components/loki.source.journal.md similarity index 59% rename from docs/sources/flow/reference/components/loki.source.journal.md rename to docs/sources/reference/components/loki.source.journal.md index de776c97b7..3b8b1905c3 100644 --- a/docs/sources/flow/reference/components/loki.source.journal.md +++ b/docs/sources/reference/components/loki.source.journal.md @@ -1,21 +1,14 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.journal/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.journal/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.journal/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.journal/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.journal/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.journal/ description: Learn about loki.source.journal title: loki.source.journal --- # loki.source.journal -`loki.source.journal` reads from the systemd journal and forwards them to other -`loki.*` components. +`loki.source.journal` reads from the systemd journal and forwards them to other `loki.*` components. -Multiple `loki.source.journal` components can be specified by giving them -different labels. +Multiple `loki.source.journal` components can be specified by giving them different labels. ## Usage @@ -31,15 +24,15 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.journal` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`format_as_json` | `bool` | Whether to forward the original journal entry as JSON. | `false` | no -`max_age` | `duration` | The oldest relative time from process start that will be read. | `"7h"` | no -`path` | `string` | Path to a directory to read entries from. | `""` | no -`matches` | `string` | Journal matches to filter. The `+` character is not supported, only logical AND matches will be added. | `""` | no -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes -`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no -`labels` | `map(string)` | The labels to apply to every log coming out of the journal. | `{}` | no +Name | Type | Description | Default | Required +-----------------|----------------------|--------------------------------------------------------------------------------------------------------|---------|--------- +`format_as_json` | `bool` | Whether to forward the original journal entry as JSON. | `false` | no +`max_age` | `duration` | The oldest relative time from process start that will be read. | `"7h"` | no +`path` | `string` | Path to a directory to read entries from. | `""` | no +`matches` | `string` | Journal matches to filter. The `+` character is not supported, only logical AND matches will be added. | `""` | no +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no +`labels` | `map(string)` | The labels to apply to every log coming out of the journal. | `{}` | no > **NOTE**: A `job` label is added with the full name of the component `loki.source.journal.LABEL`. @@ -60,17 +53,16 @@ pattern of `__journal_FIELDNAME` and will be dropped before sending to the list of receivers specified in `forward_to`. To keep these labels, use the `relabel_rules` argument and relabel them to not be prefixed with `__`. -> **NOTE**: many field names from journald start with an `_`, such as -> `_systemd_unit`. The final internal label name would be -> `__journal__systemd_unit`, with _two_ underscores between `__journal` and -> `systemd_unit`. +{{< admonition type="note" >}} +Many field names from journald start with an `_`, such as `_systemd_unit`. +The final internal label name would be `__journal__systemd_unit`, with _two_ underscores between `__journal` and `systemd_unit`. +{{< /admonition >}} -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Component health -`loki.source.journal` is only reported as unhealthy if given an invalid -configuration. +`loki.source.journal` is only reported as unhealthy if given an invalid configuration. ## Debug Metrics diff --git a/docs/sources/flow/reference/components/loki.source.kafka.md b/docs/sources/reference/components/loki.source.kafka.md similarity index 92% rename from docs/sources/flow/reference/components/loki.source.kafka.md rename to docs/sources/reference/components/loki.source.kafka.md index e7aaa2e599..bb85c6d981 100644 --- a/docs/sources/flow/reference/components/loki.source.kafka.md +++ b/docs/sources/reference/components/loki.source.kafka.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.kafka/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.kafka/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kafka/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.kafka/ description: Learn about loki.source.kafka title: loki.source.kafka --- @@ -22,8 +17,7 @@ writing events to at least one topic. Follow the steps in the [Kafka Quick Start](https://kafka.apache.org/documentation/#quickstart) to get started with Kafka. -Multiple `loki.source.kafka` components can be specified by giving them -different labels. +Multiple `loki.source.kafka` components can be specified by giving them different labels. ## Usage @@ -72,7 +66,7 @@ All labels starting with `__` are removed prior to forwarding log entries. To keep these labels, relabel them using a [loki.relabel][] component and pass its `rules` export to the `relabel_rules` argument. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Blocks @@ -87,11 +81,8 @@ The following blocks are supported inside the definition of `loki.source.kafka`: authentication > sasl_config > oauth_config | [oauth_config] | Optional authentication configuration with Kafka brokers. | no [authentication]: #authentication-block - [tls_config]: #tls_config-block - [sasl_config]: #sasl_config-block - [oauth_config]: #oauth_config-block ### authentication block @@ -107,7 +98,7 @@ you must set the `tls_config` block. If `"sasl"` is used, you must set the `sasl ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### sasl_config block @@ -136,8 +127,7 @@ The `oauth_config` is required when the SASL mechanism is set to `OAUTHBEARER`. ## Component health -`loki.source.kafka` is only reported as unhealthy if given an invalid -configuration. +`loki.source.kafka` is only reported as unhealthy if given an invalid configuration. ## Debug information diff --git a/docs/sources/flow/reference/components/loki.source.kubernetes.md b/docs/sources/reference/components/loki.source.kubernetes.md similarity index 56% rename from docs/sources/flow/reference/components/loki.source.kubernetes.md rename to docs/sources/reference/components/loki.source.kubernetes.md index 66194a3db4..1729137117 100644 --- a/docs/sources/flow/reference/components/loki.source.kubernetes.md +++ b/docs/sources/reference/components/loki.source.kubernetes.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kubernetes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.kubernetes/ description: Learn about loki.source.kubernetes labels: stage: experimental @@ -13,7 +8,7 @@ title: loki.source.kubernetes # loki.source.kubernetes -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `loki.source.kubernetes` tails logs from Kubernetes containers using the Kubernetes API. It has the following benefits over `loki.source.file`: @@ -24,12 +19,11 @@ Kubernetes API. It has the following benefits over `loki.source.file`: * It doesn't require a DaemonSet to collect logs, so one {{< param "PRODUCT_ROOT_NAME" >}} could collect logs for the whole cluster. -> **NOTE**: Because `loki.source.kubernetes` uses the Kubernetes API to tail -> logs, it uses more network traffic and CPU consumption of Kubelets than -> `loki.source.file`. +{{< admonition type="note" >}} +Because `loki.source.kubernetes` uses the Kubernetes API to tail logs, it uses more network traffic and CPU consumption of Kubelets than `loki.source.file`. +{{< /admonition >}} -Multiple `loki.source.kubernetes` components can be specified by giving them -different labels. +Multiple `loki.source.kubernetes` components can be specified by giving them different labels. ## Usage @@ -47,21 +41,17 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.kubernetes` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`targets` | `list(map(string))` | List of files to read from. | | yes -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +Name | Type | Description | Default | Required +-------------|----------------------|-------------------------------------------|---------|--------- +`targets` | `list(map(string))` | List of files to read from. | | yes +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes Each target in `targets` must have the following labels: -* `__meta_kubernetes_namespace` or `__pod_namespace__` to specify the namespace - of the pod to tail. -* `__meta_kubernetes_pod_name` or `__pod_name__` to specify the name of the pod - to tail. -* `__meta_kubernetes_pod_container_name` or `__pod_container_name__` to specify - the container within the pod to tail. -* `__meta_kubernetes_pod_uid` or `__pod_uid__` to specify the UID of the pod to - tail. +* `__meta_kubernetes_namespace` or `__pod_namespace__` to specify the namespace of the pod to tail. +* `__meta_kubernetes_pod_name` or `__pod_name__` to specify the name of the pod to tail. +* `__meta_kubernetes_pod_container_name` or `__pod_container_name__` to specify the container within the pod to tail. +* `__meta_kubernetes_pod_uid` or `__pod_uid__` to specify the UID of the pod to tail. By default, all of these labels are present when the output `discovery.kubernetes` is used. @@ -75,15 +65,15 @@ before the container has permanently terminated. The following blocks are supported inside the definition of `loki.source.kubernetes`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | Configures Kubernetes client used to tail logs. | no -client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_NAME" >}} is running in clustered mode. | no +Hierarchy | Block | Description | Required +-----------------------------|-------------------|---------------------------------------------------------------------------------------------|--------- +client | [client][] | Configures Kubernetes client used to tail logs. | no +client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_NAME" >}} is running in clustered mode. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to a `basic_auth` block defined @@ -105,18 +95,18 @@ used. The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`api_server` | `string` | URL of the Kubernetes API server. | | no -`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`api_server` | `string` | URL of the Kubernetes API server. | | no +`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument][client]. @@ -125,29 +115,29 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### clustering (beta) -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`enabled` | `bool` | Distribute log collection with other cluster nodes. | | yes +Name | Type | Description | Default | Required +----------|--------|-----------------------------------------------------|---------|--------- +`enabled` | `bool` | Distribute log collection with other cluster nodes. | | yes When {{< param "PRODUCT_ROOT_NAME" >}} is [using clustering][], and `enabled` is set to true, then this `loki.source.kubernetes` component instance opts-in to participating in the @@ -157,7 +147,7 @@ If {{< param "PRODUCT_ROOT_NAME" >}} is _not_ running in clustered mode, then th `loki.source.kubernetes` collects logs from every target it receives in its arguments. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Exported fields @@ -175,8 +165,7 @@ target: * The labels associated with the target. * The full set of labels which were found during service discovery. -* The most recent time a log line was read and forwarded to the next components - in the pipeline. +* The most recent time a log line was read and forwarded to the next components in the pipeline. * The most recent error from tailing, if any. ## Debug metrics diff --git a/docs/sources/flow/reference/components/loki.source.kubernetes_events.md b/docs/sources/reference/components/loki.source.kubernetes_events.md similarity index 54% rename from docs/sources/flow/reference/components/loki.source.kubernetes_events.md rename to docs/sources/reference/components/loki.source.kubernetes_events.md index 85a1d59637..faae42fcc3 100644 --- a/docs/sources/flow/reference/components/loki.source.kubernetes_events.md +++ b/docs/sources/reference/components/loki.source.kubernetes_events.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.kubernetes_events/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.kubernetes_events/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.kubernetes_events/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.kubernetes_events/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.kubernetes_events/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.kubernetes_events/ description: Learn about loki.source.kubernetes_events title: loki.source.kubernetes_events --- @@ -14,8 +9,7 @@ title: loki.source.kubernetes_events `loki.source.kubernetes_events` tails events from the Kubernetes API and converts them into log lines to forward to other `loki` components. -Multiple `loki.source.kubernetes_events` components can be specified by giving them -different labels. +Multiple `loki.source.kubernetes_events` components can be specified by giving them different labels. ## Usage @@ -32,28 +26,27 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.kubernetes_events` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`job_name` | `string` | Value to use for `job` label for generated logs. | `"loki.source.kubernetes_events"` | no -`log_format` | `string` | Format of the log. | `"logfmt"` | no -`namespaces` | `list(string)` | Namespaces to watch for Events in. | `[]` | no -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +Name | Type | Description | Default | Required +-------------|----------------------|--------------------------------------------------|-----------------------------------|--------- +`job_name` | `string` | Value to use for `job` label for generated logs. | `"loki.source.kubernetes_events"` | no +`log_format` | `string` | Format of the log. | `"logfmt"` | no +`namespaces` | `list(string)` | Namespaces to watch for Events in. | `[]` | no +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes By default, `loki.source.kubernetes_events` will watch for events in all namespaces. A list of explicit namespaces to watch can be provided in the `namespaces` argument. -By default, the generated log lines will be in the `logfmt` format. Use the -`log_format` argument to change it to `json`. These formats are also names of -LogQL parsers, which can be used for processing the logs. +By default, the generated log lines will be in the `logfmt` format. +Use the `log_format` argument to change it to `json`. +These formats are also names of LogQL parsers, which can be used for processing the logs. -> **NOTE**: When watching all namespaces, {{< param "PRODUCT_NAME" >}} must have permissions -> to watch events at the cluster scope (such as using a ClusterRoleBinding). If -> an explicit list of namespaces is provided, {{< param "PRODUCT_NAME" >}} only needs -> permissions to watch events for those namespaces. +{{< admonition type="note" >}} +When watching all namespaces, {{< param "PRODUCT_NAME" >}} must have permissions to watch events at the cluster scope (such as using a ClusterRoleBinding). +If an explicit list of namespaces is provided, {{< param "PRODUCT_NAME" >}} only needs permissions to watch events for those namespaces. +{{< /admonition >}} -Log lines generated by `loki.source.kubernetes_events` have the following -labels: +Log lines generated by `loki.source.kubernetes_events` have the following labels: * `namespace`: Namespace of the Kubernetes object involved in the event. * `job`: Value specified by the `job_name` argument. @@ -66,21 +59,21 @@ remove the job label, forward the output of `loki.source.kubernetes_events` to For compatibility with the `eventhandler` integration from static mode, `job_name` can be set to `"integrations/kubernetes/eventhandler"`. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Blocks The following blocks are supported inside the definition of `loki.source.kubernetes_events`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | Configures Kubernetes client used to tail logs. | no -client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +-----------------------------|-------------------|----------------------------------------------------------|--------- +client | [client][] | Configures Kubernetes client used to tail logs. | no +client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to a `basic_auth` block defined @@ -96,23 +89,22 @@ inside a `client` block. The `client` block configures the Kubernetes client used to tail logs from containers. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is -used. +configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`api_server` | `string` | URL of the Kubernetes API server. | | no -`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`api_server` | `string` | URL of the Kubernetes API server. | | no +`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument][client]. @@ -121,23 +113,23 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -145,13 +137,11 @@ Name | Type | Description ## Component health -`loki.source.kubernetes_events` is only reported as unhealthy if given an invalid -configuration. +`loki.source.kubernetes_events` is only reported as unhealthy if given an invalid configuration. ## Debug information -`loki.source.kubernetes_events` exposes the most recently read timestamp for -events in each watched namespace. +`loki.source.kubernetes_events` exposes the most recently read timestamp for events in each watched namespace. ## Debug metrics @@ -169,13 +159,12 @@ The data path is inside the directory configured by the `--storage.path` [comman In the Static mode's [eventhandler integration][eventhandler-integration], a `cache_path` argument is used to configure a positions file. In Flow mode, this argument is no longer necessary. -[cmd-args]: {{< relref "../cli/run.md" >}} -[eventhandler-integration]: {{< relref "../../../static/configuration/integrations/integrations-next/eventhandler-config.md" >}} +[cmd-args]: ../../cli/run/ +[eventhandler-integration]: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/eventhandler-config/ ## Example -This example collects watches events in the `kube-system` namespace and -forwards them to a `loki.write` component so they are written to Loki. +This example collects watches events in the `kube-system` namespace and forwards them to a `loki.write` component so they are written to Loki. ```river loki.source.kubernetes_events "example" { diff --git a/docs/sources/flow/reference/components/loki.source.podlogs.md b/docs/sources/reference/components/loki.source.podlogs.md similarity index 53% rename from docs/sources/flow/reference/components/loki.source.podlogs.md rename to docs/sources/reference/components/loki.source.podlogs.md index 7c204593b2..884e31beb2 100644 --- a/docs/sources/flow/reference/components/loki.source.podlogs.md +++ b/docs/sources/reference/components/loki.source.podlogs.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.podlogs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.podlogs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.podlogs/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.podlogs/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.podlogs/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.podlogs/ description: Learn about loki.source.podlogs labels: stage: experimental @@ -13,7 +8,7 @@ title: loki.source.podlogs # loki.source.podlogs -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `loki.source.podlogs` discovers `PodLogs` resources on Kubernetes and, using the Kubernetes API, tails logs from Kubernetes containers of Pods specified by @@ -22,14 +17,14 @@ the discovered them. `loki.source.podlogs` is similar to `loki.source.kubernetes`, but uses custom resources rather than being fed targets from another Flow component. -> **NOTE**: Unlike `loki.source.kubernetes`, it is not possible to distribute -> responsibility of collecting logs across multiple {{< param "PRODUCT_ROOT_NAME" >}}s. To avoid collecting -> duplicate logs, only one {{< param "PRODUCT_ROOT_NAME" >}} should be running a `loki.source.podlogs` -> component. +{{< admonition type="note" >}} +Unlike `loki.source.kubernetes`, it is not possible to distribute responsibility of collecting logs across multiple {{< param "PRODUCT_ROOT_NAME" >}}s. +To avoid collecting duplicate logs, only one {{< param "PRODUCT_ROOT_NAME" >}} should be running a `loki.source.podlogs` component. +{{< /admonition >}} -> **NOTE**: Because `loki.source.podlogs` uses the Kubernetes API to tail logs, -> it uses more network traffic and CPU consumption of Kubelets than -> `loki.source.file`. +{{< admonition type="note" >}} +Because `loki.source.podlogs` uses the Kubernetes API to tail logs, it uses more network traffic and CPU consumption of Kubelets than `loki.source.file`. +{{< /admonition >}} Multiple `loki.source.podlogs` components can be specified by giving them different labels. @@ -49,9 +44,9 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.podlogs` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +Name | Type | Description | Default | Required +-------------|----------------------|-------------------------------------------|---------|--------- +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes `loki.source.podlogs` searches for `PodLogs` resources on Kubernetes. Each `PodLogs` resource describes a set of pods to tail logs from. @@ -64,12 +59,12 @@ The `PodLogs` resource describes a set of Pods to collect logs from. > `monitoring.grafana.com/v1alpha2`, and is not compatible with `PodLogs` from > the {{< param "PRODUCT_ROOT_NAME" >}} Operator, which are version `v1alpha1`. -Field | Type | Description ------ | ---- | ----------- -`apiVersion` | string | `monitoring.grafana.com/v1alpha2` -`kind` | string | `PodLogs` -`metadata` | [ObjectMeta][] | Metadata for the PodLogs. -`spec` | [PodLogsSpec][] | Definition of what Pods to collect logs from. +Field | Type | Description +-------------|-----------------|---------------------------------------------- +`apiVersion` | string | `monitoring.grafana.com/v1alpha2` +`kind` | string | `PodLogs` +`metadata` | [ObjectMeta][] | Metadata for the PodLogs. +`spec` | [PodLogsSpec][] | Definition of what Pods to collect logs from. [ObjectMeta]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta [PodLogsSpec]: #podlogsspec @@ -78,39 +73,31 @@ Field | Type | Description `PodLogsSpec` describes a set of Pods to collect logs from. -Field | Type | Description ------ | ---- | ----------- -`selector` | [LabelSelector][] | Label selector of Pods to collect logs from. +Field | Type | Description +--------------------|-------------------|------------------------------------------------------------- +`selector` | [LabelSelector][] | Label selector of Pods to collect logs from. `namespaceSelector` | [LabelSelector][] | Label selector of Namespaces that Pods can be discovered in. -`relabelings` | [RelabelConfig][] | Relabel rules to apply to discovered Pods. +`relabelings` | [RelabelConfig][] | Relabel rules to apply to discovered Pods. -If `selector` is left as the default value, all Pods are discovered. If -`namespaceSelector` is left as the default value, all Namespaces are used for -Pod discovery. +If `selector` is left as the default value, all Pods are discovered. +If `namespaceSelector` is left as the default value, all Namespaces are used for Pod discovery. -The `relabelings` field can be used to modify labels from discovered Pods. The -following meta labels are available for relabeling: +The `relabelings` field can be used to modify labels from discovered Pods. +The following meta labels are available for relabeling: * `__meta_kubernetes_namespace`: The namespace of the Pod. * `__meta_kubernetes_pod_name`: The name of the Pod. * `__meta_kubernetes_pod_ip`: The pod IP of the Pod. * `__meta_kubernetes_pod_label_`: Each label from the Pod. -* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from - the Pod. -* `__meta_kubernetes_pod_annotation_`: Each annotation from the - Pod. -* `__meta_kubernetes_pod_annotationpresent_`: `true` for each - annotation from the Pod. -* `__meta_kubernetes_pod_container_init`: `true` if the container is an - `InitContainer`. +* `__meta_kubernetes_pod_labelpresent_`: `true` for each label from the Pod. +* `__meta_kubernetes_pod_annotation_`: Each annotation from the Pod. +* `__meta_kubernetes_pod_annotationpresent_`: `true` for each annotation from the Pod. +* `__meta_kubernetes_pod_container_init`: `true` if the container is an `InitContainer`. * `__meta_kubernetes_pod_container_name`: Name of the container. * `__meta_kubernetes_pod_container_image`: The image the container is using. -* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the Pod's ready - state. -* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or - `Unknown` in the lifecycle. -* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled - onto. +* `__meta_kubernetes_pod_ready`: Set to `true` or `false` for the Pod's ready state. +* `__meta_kubernetes_pod_phase`: Set to `Pending`, `Running`, `Succeeded`, `Failed` or `Unknown` in the lifecycle. +* `__meta_kubernetes_pod_node_name`: The name of the node the pod is scheduled onto. * `__meta_kubernetes_pod_host_ip`: The current host IP of the pod object. * `__meta_kubernetes_pod_uid`: The UID of the Pod. * `__meta_kubernetes_pod_controller_kind`: Object kind of the Pod's controller. @@ -132,23 +119,22 @@ In addition to the meta labels, the following labels are exposed to tell The following blocks are supported inside the definition of `loki.source.podlogs`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | Configures Kubernetes client used to tail logs. | no -client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -selector | [selector][] | Label selector for which `PodLogs` to discover. | no -selector > match_expression | [match_expression][] | Label selector expression for which `PodLogs` to discover. | no -namespace_selector | [selector][] | Label selector for which namespaces to discover `PodLogs` in. | no -namespace_selector > match_expression | [match_expression][] | Label selector expression for which namespaces to discover `PodLogs` in. | no -clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_ROOT_NAME" >}} is running in clustered mode. | no - -The `>` symbol indicates deeper levels of nesting. For example, `client > -basic_auth` refers to a `basic_auth` block defined -inside a `client` block. +Hierarchy | Block | Description | Required +--------------------------------------|----------------------|--------------------------------------------------------------------------------------------------|--------- +client | [client][] | Configures Kubernetes client used to tail logs. | no +client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +selector | [selector][] | Label selector for which `PodLogs` to discover. | no +selector > match_expression | [match_expression][] | Label selector expression for which `PodLogs` to discover. | no +namespace_selector | [selector][] | Label selector for which namespaces to discover `PodLogs` in. | no +namespace_selector > match_expression | [match_expression][] | Label selector expression for which namespaces to discover `PodLogs` in. | no +clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_ROOT_NAME" >}} is running in clustered mode. | no + +The `>` symbol indicates deeper levels of nesting. +For example, `client > basic_auth` refers to a `basic_auth` block defined inside a `client` block. [client]: #client-block [basic_auth]: #basic_auth-block @@ -163,23 +149,22 @@ inside a `client` block. The `client` block configures the Kubernetes client used to tail logs from containers. If the `client` block isn't provided, the default in-cluster -configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is -used. +configuration with the service account of the running {{< param "PRODUCT_ROOT_NAME" >}} pod is used. The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | ------- | -------- -`api_server` | `string` | URL of the Kubernetes API server. | | no -`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------|--------- +`api_server` | `string` | URL of the Kubernetes API server. | | no +`kubeconfig_file` | `string` | Path of the `kubeconfig` file to use for connecting to Kubernetes. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument][client]. @@ -188,49 +173,47 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### selector block -The `selector` block describes a Kubernetes label selector for `PodLogs` or -Namespace discovery. +The `selector` block describes a Kubernetes label selector for `PodLogs` or Namespace discovery. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`match_labels` | `map(string)` | Label keys and values used to discover resources. | `{}` | no +Name | Type | Description | Default | Required +---------------|---------------|---------------------------------------------------|---------|--------- +`match_labels` | `map(string)` | Label keys and values used to discover resources. | `{}` | no When the `match_labels` argument is empty, all resources will be matched. ### match_expression block -The `match_expression` block describes a Kubernetes label match expression for -`PodLogs` or Namespace discovery. +The `match_expression` block describes a Kubernetes label match expression for `PodLogs` or Namespace discovery. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`key` | `string` | The label name to match against. | | yes -`operator` | `string` | The operator to use when matching. | | yes -`values`| `list(string)` | The values used when matching. | | no +Name | Type | Description | Default | Required +-----------|----------------|------------------------------------|---------|--------- +`key` | `string` | The label name to match against. | | yes +`operator` | `string` | The operator to use when matching. | | yes +`values` | `list(string)` | The values used when matching. | | no The `operator` argument must be one of the following strings: @@ -244,9 +227,9 @@ Both `selector` and `namespace_selector` can make use of multiple ### clustering (beta) -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`enabled` | `bool` | Distribute log collection with other cluster nodes. | | yes +Name | Type | Description | Default | Required +----------|--------|-----------------------------------------------------|---------|--------- +`enabled` | `bool` | Distribute log collection with other cluster nodes. | | yes When {{< param "PRODUCT_NAME" >}} is [using clustering][], and `enabled` is set to true, then this `loki.source.podlogs` component instance opts-in to participating in the @@ -255,7 +238,7 @@ cluster to distribute the load of log collection between all cluster nodes. If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op and `loki.source.podlogs` collects logs based on every PodLogs resource discovered. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Exported fields diff --git a/docs/sources/flow/reference/components/loki.source.syslog.md b/docs/sources/reference/components/loki.source.syslog.md similarity index 68% rename from docs/sources/flow/reference/components/loki.source.syslog.md rename to docs/sources/reference/components/loki.source.syslog.md index b1b08bd675..19fed5694d 100644 --- a/docs/sources/flow/reference/components/loki.source.syslog.md +++ b/docs/sources/reference/components/loki.source.syslog.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.syslog/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.syslog/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.syslog/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.syslog/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.syslog/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.syslog/ description: Learn about loki.source.syslog title: loki.source.syslog --- @@ -18,8 +13,7 @@ with the [RFC5424](https://www.rfc-editor.org/rfc/rfc5424) format. The component starts a new syslog listener for each of the given `config` blocks and fans out incoming entries to the list of receivers in `forward_to`. -Multiple `loki.source.syslog` components can be specified by giving them -different labels. +Multiple `loki.source.syslog` components can be specified by giving them different labels. ## Usage @@ -38,25 +32,25 @@ loki.source.syslog "LABEL" { `loki.source.syslog` supports the following arguments: -Name | Type | Description | Default | Required ---------------- | ---------------------- | -------------------- | ------- | -------- -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes -`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | "{}" | no +Name | Type | Description | Default | Required +----------------|----------------------|-------------------------------------------|---------|--------- +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | "{}" | no The `relabel_rules` field can make use of the `rules` export value from a [loki.relabel][] component to apply one or more relabeling rules to log entries before they're forwarded to the list of receivers in `forward_to`. -[loki.relabel]: {{< relref "./loki.relabel.md" >}} +[loki.relabel]: ../loki.relabel/ ## Blocks The following blocks are supported inside the definition of `loki.source.syslog`: -Hierarchy | Name | Description | Required ---------- | ---- | ----------- | -------- -listener | [listener][] | Configures a listener for IETF Syslog (RFC5424) messages. | no +Hierarchy | Name | Description | Required +----------------------|----------------|-----------------------------------------------------------------------------|--------- +listener | [listener][] | Configures a listener for IETF Syslog (RFC5424) messages. | no listener > tls_config | [tls_config][] | Configures TLS settings for connecting to the endpoint for TCP connections. | no The `>` symbol indicates deeper levels of nesting. For example, `config > tls_config` @@ -75,34 +69,30 @@ The following arguments can be used to configure a `listener`. Only the `address` field is required and any omitted fields take their default values. -Name | Type | Description | Default | Required ------------------------- | ------------- | ----------- | ------- | -------- -`address` | `string` | The `` address to listen to for syslog messages. | | yes -`protocol` | `string` | The protocol to listen to for syslog messages. Must be either `tcp` or `udp`. | `tcp` | no -`idle_timeout` | `duration` | The idle timeout for tcp connections. | `"120s"` | no -`label_structured_data` | `bool` | Whether to translate syslog structured data to loki labels. | `false` | no -`labels` | `map(string)` | The labels to associate with each received syslog record. | `{}` | no -`use_incoming_timestamp` | `bool` | Whether to set the timestamp to the incoming syslog record timestamp. | `false` | no -`use_rfc5424_message` | `bool` | Whether to forward the full RFC5424-formatted syslog message. | `false` | no -`max_message_length` | `int` | The maximum limit to the length of syslog messages. | `8192` | no +Name | Type | Description | Default | Required +-------------------------|---------------|-------------------------------------------------------------------------------|----------|--------- +`address` | `string` | The `` address to listen to for syslog messages. | | yes +`protocol` | `string` | The protocol to listen to for syslog messages. Must be either `tcp` or `udp`. | `tcp` | no +`idle_timeout` | `duration` | The idle timeout for tcp connections. | `"120s"` | no +`label_structured_data` | `bool` | Whether to translate syslog structured data to loki labels. | `false` | no +`labels` | `map(string)` | The labels to associate with each received syslog record. | `{}` | no +`use_incoming_timestamp` | `bool` | Whether to set the timestamp to the incoming syslog record timestamp. | `false` | no +`use_rfc5424_message` | `bool` | Whether to forward the full RFC5424-formatted syslog message. | `false` | no +`max_message_length` | `int` | The maximum limit to the length of syslog messages. | `8192` | no -By default, the component assigns the log entry timestamp as the time it -was processed. +By default, the component assigns the log entry timestamp as the time it was processed. The `labels` map is applied to every message that the component reads. All header fields from the parsed RFC5424 messages are brought in as internal labels, prefixed with `__syslog_`. -If `label_structured_data` is set, structured data in the syslog header is also -translated to internal labels in the form of -`__syslog_message_sd__`. For example, a structured data entry of -`[example@99999 test="yes"]` becomes the label -`__syslog_message_sd_example_99999_test` with the value `"yes"`. +If `label_structured_data` is set, structured data in the syslog header is also translated to internal labels in the form of `__syslog_message_sd__`. +For example, a structured data entry of `[example@99999 test="yes"]` becomes the label `__syslog_message_sd_example_99999_test` with the value `"yes"`. ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/loki.source.windowsevent.md b/docs/sources/reference/components/loki.source.windowsevent.md similarity index 78% rename from docs/sources/flow/reference/components/loki.source.windowsevent.md rename to docs/sources/reference/components/loki.source.windowsevent.md index 522e9e683e..b0b7f4ee4e 100644 --- a/docs/sources/flow/reference/components/loki.source.windowsevent.md +++ b/docs/sources/reference/components/loki.source.windowsevent.md @@ -1,21 +1,14 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.source.windowsevent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.source.windowsevent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.source.windowsevent/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.windowsevent/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.source.windowsevent/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.source.windowsevent/ description: Learn about loki.windowsevent title: loki.source.windowsevent --- # loki.source.windowsevent -`loki.source.windowsevent` reads events from Windows Event Logs and forwards them to other -`loki.*` components. +`loki.source.windowsevent` reads events from Windows Event Logs and forwards them to other `loki.*` components. -Multiple `loki.source.windowsevent` components can be specified by giving them -different labels. +Multiple `loki.source.windowsevent` components can be specified by giving them different labels. ## Usage @@ -44,19 +37,18 @@ Name | Type | Description `exclude_event_message` | `bool` | Exclude the human-friendly event message. | `false` | no `use_incoming_timestamp` | `bool` | When false, assigns the current timestamp to the log when it was processed. | `false` | no `forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes -`labels` | `map(string)` | The labels to associate with incoming logs. | | no - - -> **NOTE**: `eventlog_name` is required if `xpath_query` does not specify the event log. -> You can define `xpath_query` in [short or xml form](https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events). -> When using the XML form you can specify `event_log` in the `xpath_query`. -> If using short form, you must define `eventlog_name`. +`labels` | `map(string)` | The labels to associate with incoming logs. | | no +{{< admonition type="note" >}} +`eventlog_name` is required if `xpath_query` does not specify the event log. +You can define `xpath_query` in [short or xml form](https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events). +When using the XML form you can specify `event_log` in the `xpath_query`. +If using short form, you must define `eventlog_name`. +{{< /admonition >}} ## Component health -`loki.source.windowsevent` is only reported as unhealthy if given an invalid -configuration. +`loki.source.windowsevent` is only reported as unhealthy if given an invalid configuration. ## Example diff --git a/docs/sources/flow/reference/components/loki.write.md b/docs/sources/reference/components/loki.write.md similarity index 61% rename from docs/sources/flow/reference/components/loki.write.md rename to docs/sources/reference/components/loki.write.md index bb50817385..3ce6e0554a 100644 --- a/docs/sources/flow/reference/components/loki.write.md +++ b/docs/sources/reference/components/loki.write.md @@ -1,21 +1,14 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/loki.write/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/loki.write/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/loki.write/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.write/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/loki.write/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/loki.write/ description: Learn about loki.write title: loki.write --- # loki.write -`loki.write` receives log entries from other loki components and sends them -over the network using Loki's `logproto` format. +`loki.write` receives log entries from other loki components and sends them over the network using Loki's `logproto` format. -Multiple `loki.write` components can be specified by giving them -different labels. +Multiple `loki.write` components can be specified by giving them different labels. ## Usage @@ -31,25 +24,25 @@ loki.write "LABEL" { `loki.write` supports the following arguments: -Name | Type | Description | Default | Required ------------------ | ------------- | ------------------------------------------------ | ------- | -------- -`max_streams` | `int` | Maximum number of active streams. | 0 (no limit) | no -`external_labels` | `map(string)` | Labels to add to logs sent over the network. | | no +Name | Type | Description | Default | Required +------------------|---------------|----------------------------------------------|--------------|--------- +`max_streams` | `int` | Maximum number of active streams. | 0 (no limit) | no +`external_labels` | `map(string)` | Labels to add to logs sent over the network. | | no ## Blocks The following blocks are supported inside the definition of `loki.write`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -endpoint | [endpoint][] | Location to send logs to. | no -wal | [wal][] | Write-ahead log configuration. | no -endpoint > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -endpoint > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -endpoint > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -endpoint > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -endpoint > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +-------------------------------|-------------------|----------------------------------------------------------|--------- +endpoint | [endpoint][] | Location to send logs to. | no +wal | [wal][] | Write-ahead log configuration. | no +endpoint > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +endpoint > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +endpoint > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +endpoint > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +endpoint > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | endpoint > queue_config | [queue_config][] | When WAL is enabled, configures the queue client. | no | The `>` symbol indicates deeper levels of nesting. For example, `endpoint > @@ -71,27 +64,27 @@ The `endpoint` block describes a single location to send logs to. Multiple The following arguments are supported: -Name | Type | Description | Default | Required ------------------------- | ------------------- | ------------------------------------------------------------- | --------- | -------- -`url` | `string` | Full URL to send logs to. | | yes -`name` | `string` | Optional name to identify this endpoint with. | | no -`headers` | `map(string)` | Extra headers to deliver with the request. | | no -`batch_wait` | `duration` | Maximum amount of time to wait before sending a batch. | `"1s"` | no -`batch_size` | `string` | Maximum batch size of logs to accumulate before sending. | `"1MiB"` | no -`remote_timeout` | `duration` | Timeout for requests made to the URL. | `"10s"` | no -`tenant_id` | `string` | The tenant ID used by default to push logs. | | no -`min_backoff_period` | `duration` | Initial backoff time between retries. | `"500ms"` | no -`max_backoff_period` | `duration` | Maximum backoff time between retries. | `"5m"` | no -`max_backoff_retries` | `int` | Maximum number of retries. | 10 | no -`retry_on_http_429` | `bool` | Retry when an HTTP 429 status code is received. | `true` | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|-----------|--------- +`url` | `string` | Full URL to send logs to. | | yes +`name` | `string` | Optional name to identify this endpoint with. | | no +`headers` | `map(string)` | Extra headers to deliver with the request. | | no +`batch_wait` | `duration` | Maximum amount of time to wait before sending a batch. | `"1s"` | no +`batch_size` | `string` | Maximum batch size of logs to accumulate before sending. | `"1MiB"` | no +`remote_timeout` | `duration` | Timeout for requests made to the URL. | `"10s"` | no +`tenant_id` | `string` | The tenant ID used by default to push logs. | | no +`min_backoff_period` | `duration` | Initial backoff time between retries. | `"500ms"` | no +`max_backoff_period` | `duration` | Maximum backoff time between retries. | `"5m"` | no +`max_backoff_retries` | `int` | Maximum number of retries. | 10 | no +`retry_on_http_429` | `bool` | Retry when an HTTP 429 status code is received. | `true` | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#endpoint-block). @@ -100,7 +93,7 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} If no `tenant_id` is provided, the component assumes that the Loki instance at `endpoint` is running in single-tenant mode and no X-Scope-OrgID header is @@ -122,19 +115,19 @@ enabled, the retry mechanism will be governed by the backoff configuration speci ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### queue_config block (experimental) @@ -143,9 +136,9 @@ underlying client queues batches of logs to be sent to Loki. The following arguments are supported: -| Name | Type | Description | Default | Required | -| --------------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -------- | -| `capacity` | `string` | Controls the size of the underlying send queue buffer. This setting should be considered a worst-case scenario of memory consumption, in which all enqueued batches are full. | `10MiB` | no | +| Name | Type | Description | Default | Required | +|-----------------|------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|----------| +| `capacity` | `string` | Controls the size of the underlying send queue buffer. This setting should be considered a worst-case scenario of memory consumption, in which all enqueued batches are full. | `10MiB` | no | | `drain_timeout` | `duration` | Configures the maximum time the client can take to drain the send queue upon shutdown. During that time, it will enqueue pending batches and drain the send queue sending each. | `"1m"` | no | ### wal block (experimental) @@ -165,28 +158,27 @@ storage path {{< param "PRODUCT_NAME" >}} is configured to use. See the The following arguments are supported: -Name | Type | Description | Default | Required ---------------------- |------------|--------------------------------------------------------------------------------------------------------------------|-----------| -------- -`enabled` | `bool` | Whether to enable the WAL. | false | no -`max_segment_age` | `duration` | Maximum time a WAL segment should be allowed to live. Segments older than this setting will be eventually deleted. | `"1h"` | no -`min_read_frequency` | `duration` | Minimum backoff time in the backup read mechanism. | `"250ms"` | no -`max_read_frequency` | `duration` | Maximum backoff time in the backup read mechanism. | `"1s"` | no -`drain_timeout` | `duration` | Maximum time the WAL drain procedure can take, before being forcefully stopped. | `"30s"` | no +Name | Type | Description | Default | Required +---------------------|------------|--------------------------------------------------------------------------------------------------------------------|-----------|--------- +`enabled` | `bool` | Whether to enable the WAL. | false | no +`max_segment_age` | `duration` | Maximum time a WAL segment should be allowed to live. Segments older than this setting will be eventually deleted. | `"1h"` | no +`min_read_frequency` | `duration` | Minimum backoff time in the backup read mechanism. | `"250ms"` | no +`max_read_frequency` | `duration` | Maximum backoff time in the backup read mechanism. | `"1s"` | no +`drain_timeout` | `duration` | Maximum time the WAL drain procedure can take, before being forcefully stopped. | `"30s"` | no -[run]: {{< relref "../cli/run.md" >}} +[run]: ../../cli/run/ ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +-----------|----------------|-------------------------------------------------------------- `receiver` | `LogsReceiver` | A value that other components can use to send log entries to. ## Component health -`loki.write` is only reported as unhealthy if given an invalid -configuration. +`loki.write` is only reported as unhealthy if given an invalid configuration. ## Debug information diff --git a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md b/docs/sources/reference/components/mimir.rules.kubernetes.md similarity index 81% rename from docs/sources/flow/reference/components/mimir.rules.kubernetes.md rename to docs/sources/reference/components/mimir.rules.kubernetes.md index 9a8672005b..7451a5f8ae 100644 --- a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md +++ b/docs/sources/reference/components/mimir.rules.kubernetes.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/mimir.rules.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/mimir.rules.kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/mimir.rules.kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/mimir.rules.kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/mimir.rules.kubernetes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/mimir.rules.kubernetes/ description: Learn about mimir.rules.kubernetes labels: stage: beta @@ -13,7 +8,7 @@ title: mimir.rules.kubernetes # mimir.rules.kubernetes -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `mimir.rules.kubernetes` discovers `PrometheusRule` Kubernetes resources and loads them into a Mimir instance. @@ -47,22 +42,22 @@ mimir.rules.kubernetes "LABEL" { `mimir.rules.kubernetes` supports the following arguments: -Name | Type | Description | Default | Required ------------------------- | ------------------- | --------------------------------------------------------------- | ------------- | -------- -`address` | `string` | URL of the Mimir ruler. | | yes -`tenant_id` | `string` | Mimir tenant ID. | | no -`use_legacy_routes` | `bool` | Whether to use [deprecated][gem-2_2] ruler API endpoints. | false | no -`prometheus_http_prefix` | `string` | Path prefix for [Mimir's Prometheus endpoint][gem-path-prefix]. | `/prometheus` | no -`sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no -`mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "agent" | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +-------------------------|---------------------|--------------------------------------------------------------------------------------------------|---------------|--------- +`address` | `string` | URL of the Mimir ruler. | | yes +`tenant_id` | `string` | Mimir tenant ID. | | no +`use_legacy_routes` | `bool` | Whether to use [deprecated][gem-2_2] ruler API endpoints. | false | no +`prometheus_http_prefix` | `string` | Path prefix for [Mimir's Prometheus endpoint][gem-path-prefix]. | `/prometheus` | no +`sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no +`mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "agent" | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -73,7 +68,7 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} If no `tenant_id` is provided, the component assumes that the Mimir instance at `address` is running in single-tenant mode and no `X-Scope-OrgID` header is sent. @@ -157,19 +152,19 @@ The `values` argument must not be provided when `operator` is set to `"Exists"` ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/module.file.md b/docs/sources/reference/components/module.file.md similarity index 73% rename from docs/sources/flow/reference/components/module.file.md rename to docs/sources/reference/components/module.file.md index 0e4b8b19d2..78dc24e8ad 100644 --- a/docs/sources/flow/reference/components/module.file.md +++ b/docs/sources/reference/components/module.file.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/module.file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.file/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.file/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.file/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.file/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/module.file/ description: Learn about module.file labels: stage: beta @@ -13,7 +8,7 @@ title: module.file # module.file -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `module.file` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} component which retrieves a [module][] and runs the components defined inside of it. @@ -22,9 +17,9 @@ component which retrieves a [module][] and runs the components defined inside of a [local.file][] component. This allows a single module loader to do the equivalence of using the more generic [module.string][] paired with a [local.file][] component. -[module]: {{< relref "../../concepts/modules.md" >}} -[local.file]: {{< relref "./local.file.md" >}} -[module.string]: {{< relref "./module.string.md" >}} +[module]: ../../../concepts/modules/ +[local.file]: ../local.file/ +[module.string]: ../module.string/ ## Usage @@ -44,23 +39,23 @@ module.file "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`filename` | `string` | Path of the file on disk to watch | | yes +Name | Type | Description | Default | Required +-----------------|------------|----------------------------------------------------|--------------|--------- +`filename` | `string` | Path of the file on disk to watch | | yes `detector` | `string` | Which file change detector to use (fsnotify, poll) | `"fsnotify"` | no -`poll_frequency` | `duration` | How often to poll for file changes | `"1m"` | no -`is_secret` | `bool` | Marks the file as containing a [secret][] | `false` | no +`poll_frequency` | `duration` | How often to poll for file changes | `"1m"` | no +`is_secret` | `bool` | Marks the file as containing a [secret][] | `false` | no -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets -{{< docs/shared lookup="flow/reference/components/local-file-arguments-text.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/local-file-arguments-text.md" source="alloy" version="" >}} ## Blocks The following blocks are supported inside the definition of `module.file`: -Hierarchy | Block | Description | Required ----------------- | ---------- | ----------- | -------- +Hierarchy | Block | Description | Required +----------|---------------|----------------------------------|--------- arguments | [arguments][] | Arguments to pass to the module. | no [arguments]: #arguments-block @@ -79,23 +74,22 @@ The attributes provided in the `arguments` block are validated based on the * Attributes in the `argument` block of the module loader will be rejected if they are not defined in the module source. -[argument blocks]: {{< relref "../config-blocks/argument.md" >}} +[argument blocks]: ../../config-blocks/argument/ ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|------------|---------------------------------- `exports` | `map(any)` | The exports of the Module loader. `exports` exposes the `export` config block inside a module. It can be accessed from the parent config via `module.file.LABEL.exports.EXPORT_LABEL`. -Values in `exports` correspond to [export blocks][] defined in the module -source. +Values in `exports` correspond to [export blocks][] defined in the module source. -[export blocks]: {{< relref "../config-blocks/export.md" >}} +[export blocks]: ../../config-blocks/export/ ## Component health diff --git a/docs/sources/flow/reference/components/module.git.md b/docs/sources/reference/components/module.git.md similarity index 68% rename from docs/sources/flow/reference/components/module.git.md rename to docs/sources/reference/components/module.git.md index 44bdee36a0..a0a821a008 100644 --- a/docs/sources/flow/reference/components/module.git.md +++ b/docs/sources/reference/components/module.git.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/module.git/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.git/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.git/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.git/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.git/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/module.git/ description: Learn about module.git labels: stage: beta @@ -13,14 +8,14 @@ title: module.git # module.git -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `module.git` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} component which retrieves a [module][] and runs the components defined inside of it. `module.git` retrieves a module source from a file in a Git repository. -[module]: {{< relref "../../concepts/modules.md" >}} +[module]: ../../../concepts/modules/ ## Usage @@ -41,12 +36,12 @@ module.git "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`repository` | `string` | The Git repository address to retrieve the module from. | | yes -`revision` | `string` | The Git revision to retrieve the module from. | `"HEAD"` | no -`path` | `string` | The path in the repository where the module is stored. | | yes -`pull_frequency` | `duration` | The frequency to pull the repository for updates. | `"60s"` | no +Name | Type | Description | Default | Required +-----------------|------------|---------------------------------------------------------|----------|--------- +`repository` | `string` | The Git repository address to retrieve the module from. | | yes +`revision` | `string` | The Git revision to retrieve the module from. | `"HEAD"` | no +`path` | `string` | The path in the repository where the module is stored. | | yes +`pull_frequency` | `duration` | The frequency to pull the repository for updates. | `"60s"` | no The `repository` attribute must be set to a repository address that would be recognized by Git with a `git clone REPOSITORY_ADDRESS` command, such as @@ -66,11 +61,11 @@ the retrieved changes. The following blocks are supported inside the definition of `module.git`: -Hierarchy | Block | Description | Required ----------------- | ---------- | ----------- | -------- +Hierarchy | Block | Description | Required +-----------|----------------|------------------------------------------------------|--------- basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the repo. | no -ssh_key | [ssh_key][] | Configure a SSH Key for authenticating to the repo. | no -arguments | [arguments][] | Arguments to pass to the module. | no +ssh_key | [ssh_key][] | Configure a SSH Key for authenticating to the repo. | no +arguments | [arguments][] | Arguments to pass to the module. | no [basic_auth]: #basic_auth-block [ssh_key]: #ssh_key-block @@ -78,16 +73,16 @@ arguments | [arguments][] | Arguments to pass to the module. | no ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### ssh_key block -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`username` | `string` | SSH username. | | yes -`key` | `secret` | SSH private key | | no -`key_file` | `string` | SSH private key path. | | no -`passphrase` | `secret` | Passphrase for SSH key if needed. | | no +Name | Type | Description | Default | Required +-------------|----------|-----------------------------------|---------|--------- +`username` | `string` | SSH username. | | yes +`key` | `secret` | SSH private key | | no +`key_file` | `string` | SSH private key path. | | no +`passphrase` | `secret` | Passphrase for SSH key if needed. | | no ### arguments block @@ -103,14 +98,14 @@ The attributes provided in the `arguments` block are validated based on the * Attributes in the `argument` block of the module loader will be rejected if they are not defined in the module source. -[argument blocks]: {{< relref "../config-blocks/argument.md" >}} +[argument blocks]: ../../config-blocks/argument/ ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|------------|---------------------------------- `exports` | `map(any)` | The exports of the Module loader. `exports` exposes the `export` config block inside a module. It can be accessed @@ -119,7 +114,7 @@ from the parent config via `module.git.COMPONENT_LABEL.exports.EXPORT_LABEL`. Values in `exports` correspond to [export blocks][] defined in the module source. -[export blocks]: {{< relref "../config-blocks/export.md" >}} +[export blocks]: ../../config-blocks/export/ ## Component health diff --git a/docs/sources/flow/reference/components/module.http.md b/docs/sources/reference/components/module.http.md similarity index 69% rename from docs/sources/flow/reference/components/module.http.md rename to docs/sources/reference/components/module.http.md index 24e140f794..b0ccdf67b6 100644 --- a/docs/sources/flow/reference/components/module.http.md +++ b/docs/sources/reference/components/module.http.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/module.http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.http/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/module.http/ description: Learn about module.http labels: stage: beta @@ -13,7 +8,7 @@ title: module.http # module.http -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `module.http` is a [module loader][] component. @@ -21,10 +16,10 @@ title: module.http HTTP server. This allows you to use a single module loader, rather than a `remote.http` component paired with a [module.string][] component. -[module]: {{< relref "../../concepts/modules.md" >}} -[remote.http]: {{< relref "./remote.http.md" >}} -[module.string]: {{< relref "./module.string.md" >}} -[module loader]: {{< relref "../../concepts/modules.md#module-loaders" >}} +[module]: ../../../concepts/modules/ +[remote.http]: ../remote.http/ +[module.string]: ../module.string/ +[module loader]: ../../../concepts/modules/#module-loaders ## Usage @@ -43,23 +38,23 @@ module.http "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`url` | `string` | URL to poll. | | yes -`method` | `string` | Define HTTP method for the request | `"GET"` | no -`headers` | `map(string)` | Custom headers for the request. | `{}` | no -`poll_frequency` | `duration` | Frequency to poll the URL. | `"1m"` | no -`poll_timeout` | `duration` | Timeout when polling the URL. | `"10s"` | no -`is_secret` | `bool` | Whether the response body should be treated as a secret. | false | no +Name | Type | Description | Default | Required +-----------------|---------------|--------------------------------------------------------------|---------|--------- +`url` | `string` | URL to poll. | | yes +`method` | `string` | Define HTTP method for the request | `"GET"` | no +`headers` | `map(string)` | Custom headers for the request. | `{}` | no +`poll_frequency` | `duration` | Frequency to poll the URL. | `"1m"` | no +`poll_timeout` | `duration` | Timeout when polling the URL. | `"10s"` | no +`is_secret` | `bool` | Whether the response body should be treated as a [secret][]. | false | no -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets ## Blocks The following blocks are supported inside the definition of `module.http`: -Hierarchy | Block | Description | Required ----------------- | ---------- | ----------- | -------- +Hierarchy | Block | Description | Required +----------|---------------|----------------------------------|--------- arguments | [arguments][] | Arguments to pass to the module. | no [arguments]: #arguments-block @@ -78,14 +73,14 @@ The attributes provided in the `arguments` block are validated based on the * Attributes in the `argument` block of the module loader are rejected if they are not defined in the module source. -[argument blocks]: {{< relref "../config-blocks/argument.md" >}} +[argument blocks]: ../../config-blocks/argument/ ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|------------|---------------------------------- `exports` | `map(any)` | The exports of the Module loader. `exports` exposes the `export` config block inside a module. It can be accessed @@ -94,7 +89,7 @@ from the parent config via `module.http.LABEL.exports.EXPORT_LABEL`. Values in `exports` correspond to [export blocks][] defined in the module source. -[export blocks]: {{< relref "../config-blocks/export.md" >}} +[export blocks]: ../../config-blocks/export/ ## Component health diff --git a/docs/sources/flow/reference/components/module.string.md b/docs/sources/reference/components/module.string.md similarity index 76% rename from docs/sources/flow/reference/components/module.string.md rename to docs/sources/reference/components/module.string.md index ef8c5e0b88..ee4fbd2a8d 100644 --- a/docs/sources/flow/reference/components/module.string.md +++ b/docs/sources/reference/components/module.string.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/module.string/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/module.string/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/module.string/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/module.string/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/module.string/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/module.string/ description: Learn about module.string labels: stage: beta @@ -13,12 +8,12 @@ title: module.string # module.string -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `module.string` is a *module loader* component. A module loader is a {{< param "PRODUCT_NAME" >}} component which retrieves a [module][] and runs the components defined inside of it. -[module]: {{< relref "../../concepts/modules.md" >}} +[module]: ../../../concepts/modules/ ## Usage @@ -38,9 +33,9 @@ module.string "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`content` | `secret` or `string` | The contents of the module to load as a secret or string. | | yes +Name | Type | Description | Default | Required +----------|----------------------|-----------------------------------------------------------|---------|--------- +`content` | `secret` or `string` | The contents of the module to load as a secret or string. | | yes `content` is a string that contains the configuration of the module to load. `content` is typically loaded by using the exports of another component. For example, @@ -53,8 +48,8 @@ Name | Type | Description | Default | Required The following blocks are supported inside the definition of `module.string`: -Hierarchy | Block | Description | Required ----------------- | ---------- | ----------- | -------- +Hierarchy | Block | Description | Required +----------|---------------|----------------------------------|--------- arguments | [arguments][] | Arguments to pass to the module. | no [arguments]: #arguments-block @@ -73,14 +68,14 @@ The attributes provided in the `arguments` block are validated based on the * Attributes in the `argument` block of the module loader will be rejected if they are not defined in the module source. -[argument blocks]: {{< relref "../config-blocks/argument.md" >}} +[argument blocks]: ../../config-blocks/argument/ ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|------------|---------------------------------- `exports` | `map(any)` | The exports of the Module loader. `exports` exposes the `export` config block inside a module. It can be accessed @@ -89,7 +84,7 @@ from the parent config via `module.string.LABEL.exports.EXPORT_LABEL`. Values in `exports` correspond to [export blocks][] defined in the module source. -[export blocks]: {{< relref "../config-blocks/export.md" >}} +[export blocks]: ../../config-blocks/export/ ## Component health diff --git a/docs/sources/flow/reference/components/otelcol.auth.basic.md b/docs/sources/reference/components/otelcol.auth.basic.md similarity index 69% rename from docs/sources/flow/reference/components/otelcol.auth.basic.md rename to docs/sources/reference/components/otelcol.auth.basic.md index 885eb53f09..97dbaf0e08 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.basic.md +++ b/docs/sources/reference/components/otelcol.auth.basic.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.basic/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.basic/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.basic/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.basic/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.basic/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.auth.basic/ description: Learn about otelcol.auth.basic title: otelcol.auth.basic --- @@ -36,17 +31,17 @@ otelcol.auth.basic "LABEL" { `otelcol.auth.basic` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`username` | `string` | Username to use for basic authentication requests. | | yes -`password` | `secret` | Password to use for basic authentication requests. | | yes +Name | Type | Description | Default | Required +-----------|----------|----------------------------------------------------|---------|--------- +`username` | `string` | Username to use for basic authentication requests. | | yes +`password` | `secret` | Password to use for basic authentication requests. | | yes ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|----------------------------|---------------------------------------------------------------- `handler` | `capsule(otelcol.Handler)` | A value that other components can use to authenticate requests. ## Component health @@ -76,4 +71,4 @@ otelcol.auth.basic "creds" { } ``` -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ diff --git a/docs/sources/flow/reference/components/otelcol.auth.bearer.md b/docs/sources/reference/components/otelcol.auth.bearer.md similarity index 62% rename from docs/sources/flow/reference/components/otelcol.auth.bearer.md rename to docs/sources/reference/components/otelcol.auth.bearer.md index 718789603b..1bdcea1885 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.bearer.md +++ b/docs/sources/reference/components/otelcol.auth.bearer.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.bearer/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.bearer/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.bearer/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.bearer/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.bearer/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.auth.bearer/ description: Learn about otelcol.auth.bearer title: otelcol.auth.bearer --- @@ -16,12 +11,12 @@ components to authenticate requests using bearer token authentication. This extension supports both server and client authentication. -> **NOTE**: `otelcol.auth.bearer` is a wrapper over the upstream OpenTelemetry -> Collector `bearertokenauth` extension. Bug reports or feature requests will -> be redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.auth.bearer` is a wrapper over the upstream OpenTelemetry Collector `bearertokenauth` extension. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} -Multiple `otelcol.auth.bearer` components can be specified by giving them -different labels. +Multiple `otelcol.auth.bearer` components can be specified by giving them different labels. ## Usage @@ -35,10 +30,10 @@ otelcol.auth.bearer "LABEL" { `otelcol.auth.bearer` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`token` | `secret` | Bearer token to use for authenticating requests. | | yes -`scheme` | `string` | Authentication scheme name. | "Bearer" | no +Name | Type | Description | Default | Required +---------|----------|--------------------------------------------------|----------|--------- +`token` | `secret` | Bearer token to use for authenticating requests. | | yes +`scheme` | `string` | Authentication scheme name. | "Bearer" | no When sending the token, the value of `scheme` is prepended to the `token` value. The string is then sent out as either a header (in case of HTTP) or as metadata (in case of gRPC). @@ -47,8 +42,8 @@ The string is then sent out as either a header (in case of HTTP) or as metadata The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|----------------------------|---------------------------------------------------------------- `handler` | `capsule(otelcol.Handler)` | A value that other components can use to authenticate requests. ## Component health @@ -66,8 +61,7 @@ configuration. The example below configures [otelcol.exporter.otlp][] to use a bearer token authentication. -If we assume that the value of the `API_KEY` environment variable is `SECRET_API_KEY`, then -the `Authorization` RPC metadata is set to `Bearer SECRET_API_KEY`. +If we assume that the value of the `API_KEY` environment variable is `SECRET_API_KEY`, then the `Authorization` RPC metadata is set to `Bearer SECRET_API_KEY`. ```river otelcol.exporter.otlp "example" { @@ -103,5 +97,5 @@ otelcol.auth.bearer "creds" { } ``` -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} -[otelcol.exporter.otlphttp]: {{< relref "./otelcol.exporter.otlphttp.md" >}} +[otelcol.exporter.otlp]: {../otelcol.exporter.otlp/ +[otelcol.exporter.otlphttp]: ../otelcol.exporter.otlphttp/ diff --git a/docs/sources/flow/reference/components/otelcol.auth.headers.md b/docs/sources/reference/components/otelcol.auth.headers.md similarity index 71% rename from docs/sources/flow/reference/components/otelcol.auth.headers.md rename to docs/sources/reference/components/otelcol.auth.headers.md index 6b70a021de..734b24c992 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.headers.md +++ b/docs/sources/reference/components/otelcol.auth.headers.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.headers/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.headers/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.headers/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.headers/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.headers/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.auth.headers/ description: Learn about otelcol.auth.headers title: otelcol.auth.headers --- @@ -14,12 +9,12 @@ title: otelcol.auth.headers `otelcol.auth.headers` exposes a `handler` that can be used by other `otelcol` components to authenticate requests using custom headers. -> **NOTE**: `otelcol.auth.headers` is a wrapper over the upstream OpenTelemetry -> Collector `headerssetter` extension. Bug reports or feature requests will be -> redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.auth.headers` is a wrapper over the upstream OpenTelemetry Collector `headerssetter` extension. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} -Multiple `otelcol.auth.headers` components can be specified by giving them -different labels. +Multiple `otelcol.auth.headers` components can be specified by giving them different labels. ## Usage @@ -42,9 +37,9 @@ through inner blocks. The following blocks are supported inside the definition of `otelcol.auth.headers`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -header | [header][] | Custom header to attach to requests. | no +Hierarchy | Block | Description | Required +----------|------------|--------------------------------------|--------- +header | [header][] | Custom header to attach to requests. | no [header]: #header-block @@ -53,12 +48,12 @@ header | [header][] | Custom header to attach to requests. | no The `header` block defines a custom header to attach to requests. It is valid to provide multiple `header` blocks to set more than one header. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`key` | `string` | Name of the header to set. | | yes -`value` | `string` or `secret` | Value of the header. | | no -`from_context` | `string` | Metadata name to get header value from. | | no -`action` | `string` | An action to perform on the header | "upsert" | no +Name | Type | Description | Default | Required +---------------|----------------------|-----------------------------------------|----------|--------- +`key` | `string` | Name of the header to set. | | yes +`value` | `string` or `secret` | Value of the header. | | no +`from_context` | `string` | Metadata name to get header value from. | | no +`action` | `string` | An action to perform on the header | "upsert" | no The supported values for `action` are: * `insert`: Inserts the new header if it does not exist. @@ -145,4 +140,4 @@ otelcol.exporter.otlp "production" { } ``` -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ diff --git a/docs/sources/flow/reference/components/otelcol.auth.oauth2.md b/docs/sources/reference/components/otelcol.auth.oauth2.md similarity index 63% rename from docs/sources/flow/reference/components/otelcol.auth.oauth2.md rename to docs/sources/reference/components/otelcol.auth.oauth2.md index 4584f47eb7..28e7cc8e20 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.oauth2.md +++ b/docs/sources/reference/components/otelcol.auth.oauth2.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.oauth2/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.oauth2/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.oauth2/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.oauth2/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.oauth2/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.auth.oauth2/ description: Learn about otelcol.auth.oauth2 title: otelcol.auth.oauth2 --- @@ -37,14 +32,14 @@ otelcol.auth.oauth2 "LABEL" { ## Arguments -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`client_id` | `string` | The client identifier issued to the client. | | yes -`client_secret` | `secret` | The secret string associated with the client identifier. | | yes -`token_url` | `string` | The server endpoint URL from which to get tokens. | | yes -`endpoint_params` | `map(list(string))` | Additional parameters that are sent to the token endpoint. | `{}` | no -`scopes` | `list(string)` | Requested permissions associated for the client. | `[]` | no -`timeout` | `duration` | The timeout on the client connecting to `token_url`. | `"0s"` | no +Name | Type | Description | Default | Required +------------------|---------------------|------------------------------------------------------------|---------|--------- +`client_id` | `string` | The client identifier issued to the client. | | yes +`client_secret` | `secret` | The secret string associated with the client identifier. | | yes +`token_url` | `string` | The server endpoint URL from which to get tokens. | | yes +`endpoint_params` | `map(list(string))` | Additional parameters that are sent to the token endpoint. | `{}` | no +`scopes` | `list(string)` | Requested permissions associated for the client. | `[]` | no +`timeout` | `duration` | The timeout on the client connecting to `token_url`. | `"0s"` | no The `timeout` argument is used both for requesting initial tokens and for refreshing tokens. `"0s"` implies no timeout. @@ -53,25 +48,24 @@ The `timeout` argument is used both for requesting initial tokens and for refres The following blocks are supported inside the definition of `otelcol.auth.oauth2`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -tls | [tls][] | TLS settings for the token client. | no +Hierarchy | Block | Description | Required +----------|---------|------------------------------------|--------- +tls | [tls][] | TLS settings for the token client. | no [tls]: #tls-block ### tls block -The `tls` block configures TLS settings used for connecting to the token client. If the `tls` block isn't provided, -TLS won't be used for communication. +The `tls` block configures TLS settings used for connecting to the token client. If the `tls` block isn't provided, TLS won't be used for communication. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|----------------------------|---------------------------------------------------------------- `handler` | `capsule(otelcol.Handler)` | A value that other components can use to authenticate requests. ## Component health @@ -121,4 +115,4 @@ otelcol.auth.oauth2 "creds" { } ``` -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ diff --git a/docs/sources/flow/reference/components/otelcol.auth.sigv4.md b/docs/sources/reference/components/otelcol.auth.sigv4.md similarity index 64% rename from docs/sources/flow/reference/components/otelcol.auth.sigv4.md rename to docs/sources/reference/components/otelcol.auth.sigv4.md index e4fc91df28..8ac55e2918 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.sigv4.md +++ b/docs/sources/reference/components/otelcol.auth.sigv4.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.auth.sigv4/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.auth.sigv4/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.auth.sigv4/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.sigv4/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.auth.sigv4/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.auth.sigv4/ description: Learn about otelcol.auth.sigv4 title: otelcol.auth.sigv4 --- @@ -12,9 +7,10 @@ title: otelcol.auth.sigv4 # otelcol.auth.sigv4 `otelcol.auth.sigv4` exposes a `handler` that can be used by other `otelcol` -components to authenticate requests to AWS services using the AWS Signature Version 4 (SigV4) protocol. -For more information about SigV4 see the AWS documentation about -[Signing AWS API requests](https://docs.aws.amazon.com/general/latest/gr/signing-aws-api-requests.html) . +components to authenticate requests to AWS services using the AWS Signature Version 4 (SigV4) protocol. +For more information about SigV4 see the AWS documentation about [Signing AWS API requests][]. + +[Signing AWS API requests]: https://docs.aws.amazon.com/general/latest/gr/signing-aws-api-requests.html > **NOTE**: `otelcol.auth.sigv4` is a wrapper over the upstream OpenTelemetry > Collector `sigv4auth` extension. Bug reports or feature requests will be @@ -23,8 +19,11 @@ For more information about SigV4 see the AWS documentation about Multiple `otelcol.auth.sigv4` components can be specified by giving them different labels. -> **NOTE**: The Agent must have valid AWS credentials as used by the -[AWS SDK for Go](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials). +{{< admonition type="note" >}} +{{< param "PRODUCT_NAME" >}} must have valid AWS credentials as used by the [AWS SDK for Go][]. + +[AWS SDK for Go]: https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials +{{< /admonition >}} ## Usage @@ -35,31 +34,31 @@ otelcol.auth.sigv4 "LABEL" { ## Arguments -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`region` | `string` | The AWS region to sign with. | "" | no -`service` | `string` | The AWS service to sign with. | "" | no +Name | Type | Description | Default | Required +----------|----------|-------------------------------|---------|--------- +`region` | `string` | The AWS region to sign with. | "" | no +`service` | `string` | The AWS service to sign with. | "" | no If `region` and `service` are left empty, their values are inferred from the URL of the exporter using the following rules: * If the exporter URL starts with `aps-workspaces` and `service` is empty, `service` will be set to `aps`. * If the exporter URL starts with `search-` and `service` is empty, `service` will be set to `es`. -* If the exporter URL starts with either `aps-workspaces` or `search-` and `region` is empty, `region` . -will be set to the value between the first and second `.` character in the exporter URL. +* If the exporter URL starts with either `aps-workspaces` or `search-` and `region` is empty, `region` will be set to the value between the first and second `.` character in the exporter URL. If none of the above rules apply, then `region` and `service` must be specified. -A list of valid AWS regions can be found on Amazon's documentation for -[Regions, Availability Zones, and Local Zones](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). +A list of valid AWS regions can be found on Amazon's documentation for [Regions, Availability Zones, and Local Zones][]. + +[Regions, Availability Zones, and Local Zones]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html ## Blocks The following blocks are supported inside the definition of `otelcol.auth.sigv4`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +------------|-----------------|------------------------------------|--------- assume_role | [assume_role][] | Configuration for assuming a role. | no [assume_role]: #assume_role-block @@ -68,14 +67,13 @@ assume_role | [assume_role][] | Configuration for assuming a role. | no The `assume_role` block specifies the configuration needed to assume a role. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`arn` | `string` | The Amazon Resource Name (ARN) of a role to assume. | "" | no -`session_name` | `string` | The name of a role session. | "" | no -`sts_region` | `string` | The AWS region where STS is used to assume the configured role. | "" | no +Name | Type | Description | Default | Required +---------------|----------|-----------------------------------------------------------------|---------|--------- +`arn` | `string` | The Amazon Resource Name (ARN) of a role to assume. | "" | no +`session_name` | `string` | The name of a role session. | "" | no +`sts_region` | `string` | The AWS region where STS is used to assume the configured role. | "" | no -If the `assume_role` block is specified in the config and `sts_region` is not set, then `sts_region` -will default to the value for `region`. +If the `assume_role` block is specified in the config and `sts_region` is not set, then `sts_region` will default to the value for `region`. For cross region authentication, `region` and `sts_region` can be set different to different values. @@ -83,8 +81,8 @@ For cross region authentication, `region` and `sts_region` can be set different The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +----------|----------------------------|---------------------------------------------------------------- `handler` | `capsule(otelcol.Handler)` | A value that other components can use to authenticate requests. ## Component health @@ -153,8 +151,7 @@ otelcol.auth.sigv4 "creds" { ### Specifying "region" and "service" explicitly and adding a "role" to assume -In this example we have also specified configuration to assume a role. `sts_region` has not been -provided, so it will default to the value of `region` which is `example_region`. +In this example we have also specified configuration to assume a role. `sts_region` hasn't been provided, so it will default to the value of `region` which is `example_region`. ```river otelcol.exporter.otlp "example" { @@ -167,7 +164,7 @@ otelcol.exporter.otlp "example" { otelcol.auth.sigv4 "creds" { region = "example_region" service = "example_service" - + assume_role { session_name = "role_session_name" } diff --git a/docs/sources/flow/reference/components/otelcol.connector.host_info.md b/docs/sources/reference/components/otelcol.connector.host_info.md similarity index 87% rename from docs/sources/flow/reference/components/otelcol.connector.host_info.md rename to docs/sources/reference/components/otelcol.connector.host_info.md index 53d8a1663a..81c4bf1a0e 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.host_info.md +++ b/docs/sources/reference/components/otelcol.connector.host_info.md @@ -1,8 +1,5 @@ --- -aliases: - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.connector.host_info/ - - /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.connector.host_info/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.connector.host_info/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.connector.host_info/ description: Learn about otelcol.connector.host_info labels: stage: experimental @@ -11,7 +8,7 @@ title: otelcol.connector.host_info # otelcol.connector.host_info -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otel.connector.host_info` accepts span data from other `otelcol` components and generates usage metrics. @@ -47,7 +44,7 @@ The following blocks are supported inside the definition of ### output block -{{< docs/shared lookup="flow/reference/components/output-block-metrics.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-metrics.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md b/docs/sources/reference/components/otelcol.connector.servicegraph.md similarity index 74% rename from docs/sources/flow/reference/components/otelcol.connector.servicegraph.md rename to docs/sources/reference/components/otelcol.connector.servicegraph.md index 06f20833f0..48f9b0e39d 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md +++ b/docs/sources/reference/components/otelcol.connector.servicegraph.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.connector.servicegraph/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.connector.servicegraph/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.connector.servicegraph/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.connector.servicegraph/ description: Learn about otelcol.connector.servicegraph labels: stage: experimental @@ -11,14 +8,13 @@ title: otelcol.connector.servicegraph # otelcol.connector.servicegraph -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} -`otelcol.connector.servicegraph` accepts span data from other `otelcol` components and -outputs metrics representing the relationship between various services in a system. +`otelcol.connector.servicegraph` accepts span data from other `otelcol` components and outputs metrics representing the relationship between various services in a system. A metric represents an edge in the service graph. -Those metrics can then be used by a data visualization application (e.g. -[Grafana](/docs/grafana/latest/explore/trace-integration/#service-graph)) -to draw the service graph. +Those metrics can then be used by a data visualization application (e.g. [Grafana][]) to draw the service graph. + +[Grafana]: https://grafana.com/docs/grafana/latest/explore/trace-integration/#service-graph > **NOTE**: `otelcol.connector.servicegraph` is a wrapper over the upstream > OpenTelemetry Collector `servicegraph` connector. Bug reports or feature requests @@ -31,7 +27,7 @@ This component is based on [Grafana Tempo's service graph processor](https://git Service graphs are useful for a number of use-cases: -* Infer the topology of a distributed system. As distributed systems grow, they become more complex. +* Infer the topology of a distributed system. As distributed systems grow, they become more complex. Service graphs can help you understand the structure of the system. * Provide a high level overview of the health of your system. Service graphs show error rates, latencies, and other relevant data. @@ -42,9 +38,11 @@ Service graphs are useful for a number of use-cases: Since `otelcol.connector.servicegraph` has to process both sides of an edge, it needs to process all spans of a trace to function properly. If spans of a trace are spread out over multiple Agent instances, spans cannot be paired reliably. -A solution to this problem is using [otelcol.exporter.loadbalancing]({{< relref "./otelcol.exporter.loadbalancing.md" >}}) +A solution to this problem is using [otelcol.exporter.loadbalancing][] in front of Agent instances running `otelcol.connector.servicegraph`. +[otelcol.exporter.loadbalancing]: ../otelcol.exporter.loadbalancing/ + ## Usage ```river @@ -59,28 +57,26 @@ otelcol.connector.servicegraph "LABEL" { `otelcol.connector.servicegraph` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`latency_histogram_buckets` | `list(duration)` | Buckets for latency histogram metrics. | `["2ms", "4ms", "6ms", "8ms", "10ms", "50ms", "100ms", "200ms", "400ms", "800ms", "1s", "1400ms", "2s", "5s", "10s", "15s"]` | no -`dimensions` | `list(string)` | A list of dimensions to add with the default dimensions. | `[]` | no -`cache_loop` | `duration` | Configures how often to delete series which have not been updated. | `"1m"` | no -`store_expiration_loop` | `duration` | The time to expire old entries from the store periodically. | `"2s"` | no - -Service graphs work by inspecting traces and looking for spans with -parent-children relationship that represent a request. -`otelcol.connector.servicegraph` uses OpenTelemetry semantic conventions -to detect a myriad of requests. +Name | Type | Description | Default | Required +----------------------------|------------------|--------------------------------------------------------------------|---------|--------- +`latency_histogram_buckets` | `list(duration)` | Buckets for latency histogram metrics. | `["2ms", "4ms", "6ms", "8ms", "10ms", "50ms", "100ms", "200ms", "400ms", "800ms", "1s", "1400ms", "2s", "5s", "10s", "15s"]` | no +`dimensions` | `list(string)` | A list of dimensions to add with the default dimensions. | `[]` | no +`cache_loop` | `duration` | Configures how often to delete series which have not been updated. | `"1m"` | no +`store_expiration_loop` | `duration` | The time to expire old entries from the store periodically. | `"2s"` | no + +Service graphs work by inspecting traces and looking for spans with parent-children relationship that represent a request. +`otelcol.connector.servicegraph` uses OpenTelemetry semantic conventions to detect a myriad of requests. The following requests are currently supported: * A direct request between two services, where the outgoing and the incoming span must have a [Span Kind][] value of `client` and `server` respectively. -* A request across a messaging system, where the outgoing and the incoming span +* A request across a messaging system, where the outgoing and the incoming span must have a [Span Kind][] value of `producer` and `consumer` respectively. * A database request, where spans have a [Span Kind][] with a value of `client`, as well as an attribute with a key of `db.name`. Every span which can be paired up to form a request is kept in an in-memory store: -* If the TTL of the span expires before it can be paired, it is deleted from the store. +* If the TTL of the span expires before it can be paired, it is deleted from the store. TTL is configured in the [store][] block. * If the span is paired prior to its expiration, a metric is recorded and the span is deleted from the store. @@ -97,12 +93,11 @@ The following metrics are emitted by the processor: Duration is measured both from the client and the server sides. -The `latency_histogram_buckets` argument controls the buckets for +The `latency_histogram_buckets` argument controls the buckets for `traces_service_graph_request_server_seconds` and `traces_service_graph_request_client_seconds`. -Each emitted metrics series have a `client` and a `server` label corresponding with the -service doing the request and the service receiving the request. The value of the label -is derived from the `service.name` resource attribute of the two spans. +Each emitted metrics series have a `client` and a `server` label corresponding with the service doing the request and the service receiving the request. +The value of the label is derived from the `service.name` resource attribute of the two spans. The `connection_type` label may not be set. If it is set, its value will be either `messaging_system` or `database`. @@ -110,8 +105,7 @@ Additional labels can be included using the `dimensions` configuration option: * Those labels will have a prefix to mark where they originate (client or server span kinds). The `client_` prefix relates to the dimensions coming from spans with a [Span Kind][] of `client`. The `server_` prefix relates to the dimensions coming from spans with a [Span Kind][] of `server`. -* Firstly the resource attributes will be searched. If the attribute is not found, - the span attributes will be searched. +* Firstly the resource attributes will be searched. If the attribute is not found, the span attributes will be searched. [Span Kind]: https://opentelemetry.io/docs/concepts/signals/traces/#span-kind @@ -120,10 +114,10 @@ Additional labels can be included using the `dimensions` configuration option: The following blocks are supported inside the definition of `otelcol.connector.servicegraph`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -store | [store][] | Configures the in-memory store for spans. | no -output | [output][] | Configures where to send telemetry data. | yes +Hierarchy | Block | Description | Required +----------|------------|-------------------------------------------|--------- +store | [store][] | Configures the in-memory store for spans. | no +output | [output][] | Configures where to send telemetry data. | yes [store]: #store-block [output]: #output-block @@ -132,21 +126,21 @@ output | [output][] | Configures where to send telemetry data. | yes The `store` block configures the in-memory store for spans. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`max_items` | `number` | Maximum number of items to keep in the store. | `1000` | no -`ttl` | `duration` | The time to live for spans in the store. | `"2s"` | no +Name | Type | Description | Default | Required +------------|------------|-----------------------------------------------|---------|--------- +`max_items` | `number` | Maximum number of items to keep in the store. | `1000` | no +`ttl` | `duration` | The time to live for spans in the store. | `"2s"` | no ### output block -{{< docs/shared lookup="flow/reference/components/output-block-metrics.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-metrics.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` traces telemetry data. It does not accept metrics and logs. @@ -166,15 +160,14 @@ information. The example below accepts traces, creates service graph metrics from them, and writes the metrics to Mimir. The traces are written to Tempo. -`otelcol.connector.servicegraph` also adds a label to each metric with the value of the "http.method" -span/resource attribute. +`otelcol.connector.servicegraph` also adds a label to each metric with the value of the "http.method" span/resource attribute. ```river otelcol.receiver.otlp "default" { grpc { endpoint = "0.0.0.0:4320" } - + output { traces = [otelcol.connector.servicegraph.default.input,otelcol.exporter.otlp.grafana_cloud_tempo.input] } @@ -194,7 +187,7 @@ otelcol.exporter.prometheus "default" { prometheus.remote_write "mimir" { endpoint { url = "https://prometheus-xxx.grafana.net/api/prom/push" - + basic_auth { username = env("PROMETHEUS_USERNAME") password = env("GRAFANA_CLOUD_API_KEY") @@ -216,10 +209,13 @@ otelcol.auth.basic "grafana_cloud_tempo" { ``` Some of the metrics in Mimir may look like this: + ``` traces_service_graph_request_total{client="shop-backend",failed="false",server="article-service",client_http_method="DELETE",server_http_method="DELETE"} traces_service_graph_request_failed_total{client="shop-backend",client_http_method="POST",failed="false",server="auth-service",server_http_method="POST"} -``` +``` + + ## Compatible components diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md b/docs/sources/reference/components/otelcol.connector.spanlogs.md similarity index 88% rename from docs/sources/flow/reference/components/otelcol.connector.spanlogs.md rename to docs/sources/reference/components/otelcol.connector.spanlogs.md index ec49e0509c..266bdd778e 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md +++ b/docs/sources/reference/components/otelcol.connector.spanlogs.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.connector.spanlogs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.connector.spanlogs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.connector.spanlogs/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.connector.spanlogs/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.connector.spanlogs/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.connector.spanlogs/ description: Learn about otelcol.connector.spanlogs title: otelcol.connector.spanlogs --- @@ -15,12 +10,14 @@ title: otelcol.connector.spanlogs components and outputs logs telemetry data for each span, root, or process. This allows you to automatically build a mechanism for trace discovery. -> **NOTE**: `otelcol.connector.spanlogs` is a custom component unrelated -> to any components from the OpenTelemetry Collector. It is based on the -> `automatic_logging` component in the [traces]({{< relref "../../../static/configuration/traces-config" >}}) subsystem of the Agent static mode. +{{< admonition type="note" >}} +`otelcol.connector.spanlogs` is a custom component unrelated to any components from the OpenTelemetry Collector. +It is based on the `automatic_logging` component in the [traces][] subsystem of Grafana Agent Static. + +[traces]: https://grafana.com/docs/agent/latest/static/configuration/traces-config +{{< /admonition >}} -You can specify multiple `otelcol.connector.spanlogs` components by giving them -different labels. +You can specify multiple `otelcol.connector.spanlogs` components by giving them different labels. ## Usage @@ -47,7 +44,9 @@ otelcol.connector.spanlogs "LABEL" { The values listed in `labels` should be the values of either span or process attributes. -> **WARNING**: Setting `spans` to `true` could lead to a high volume of logs. +{{< admonition type="warning" >}} +Setting `spans` to `true` could lead to a high volume of logs. +{{< /admonition >}} ## Blocks @@ -79,7 +78,7 @@ The following attributes are supported: ### output block -{{< docs/shared lookup="flow/reference/components/output-block-logs.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-logs.md" source="alloy" version="" >}} ## Exported fields @@ -89,18 +88,15 @@ The following fields are exported and can be referenced by other components: | ------- | ------------------ | ---------------------------------------------------------------- | | `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. | -`input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, -logs, or traces). +`input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, logs, or traces). ## Component health -`otelcol.connector.spanlogs` is only reported as unhealthy if given an invalid -configuration. +`otelcol.connector.spanlogs` is only reported as unhealthy if given an invalid configuration. ## Debug information -`otelcol.connector.spanlogs` does not expose any component-specific debug -information. +`otelcol.connector.spanlogs` does not expose any component-specific debug information. ## Example diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md b/docs/sources/reference/components/otelcol.connector.spanmetrics.md similarity index 96% rename from docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md rename to docs/sources/reference/components/otelcol.connector.spanmetrics.md index ffc5f408cc..d010475c90 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md +++ b/docs/sources/reference/components/otelcol.connector.spanmetrics.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.connector.spanmetrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.connector.spanmetrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.connector.spanmetrics/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.connector.spanmetrics/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.connector.spanmetrics/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.connector.spanmetrics/ description: Learn about otelcol.connector.spanmetrics labels: stage: experimental @@ -13,7 +8,7 @@ title: otelcol.connector.spanmetrics # otelcol.connector.spanmetrics -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otelcol.connector.spanmetrics` accepts span data from other `otelcol` components and aggregates Request, Error and Duration (R.E.D) OpenTelemetry metrics from the spans: @@ -171,8 +166,8 @@ The `explicit` block configures a histogram with explicit buckets. The following attributes are supported: -| Name | Type | Description | Default | Required | -| --------- | ---------------- | -------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | -------- | +| Name | Type | Description | Default | Required | +| --------- | ---------------- | -------------------------- | ------------------------------------------ | -------- | | `buckets` | `list(duration)` | List of histogram buckets. | `["2ms", "4ms", "6ms", "8ms", "10ms", "50ms", "100ms", "200ms", "400ms", "800ms", "1s", "1400ms", "2s", "5s", "10s", "15s"]` | no | ### exemplars block @@ -187,7 +182,7 @@ The following attributes are supported: ### output block -{{< docs/shared lookup="flow/reference/components/output-block-metrics.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-metrics.md" source="alloy" version="" >}} ## Exported fields @@ -626,6 +621,7 @@ but different resource attributes, `otelcol.exporter.prometheus` will convert th This problem can be solved by doing **either** of the following: - **Recommended approach:** Prior to `otelcol.connector.spanmetrics`, remove all resource attributes from the incoming spans which are not needed by `otelcol.connector.spanmetrics`. + {{< collapse title="Example River configuration to remove unnecessary resource attributes." >}} ```river otelcol.receiver.otlp "default" { diff --git a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md b/docs/sources/reference/components/otelcol.exporter.loadbalancing.md similarity index 79% rename from docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md rename to docs/sources/reference/components/otelcol.exporter.loadbalancing.md index f25e28bfa3..58595b58e1 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md +++ b/docs/sources/reference/components/otelcol.exporter.loadbalancing.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.loadbalancing/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.loadbalancing/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.loadbalancing/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.loadbalancing/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.loadbalancing/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.loadbalancing/ description: Learn about otelcol.exporter.loadbalancing labels: stage: beta @@ -13,29 +8,30 @@ title: otelcol.exporter.loadbalancing # otelcol.exporter.loadbalancing -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `otelcol.exporter.loadbalancing` accepts logs and traces from other `otelcol` components -and writes them over the network using the OpenTelemetry Protocol (OTLP) protocol. +and writes them over the network using the OpenTelemetry Protocol (OTLP) protocol. -> **NOTE**: `otelcol.exporter.loadbalancing` is a wrapper over the upstream -> OpenTelemetry Collector `loadbalancing` exporter. Bug reports or feature requests will -> be redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.exporter.loadbalancing` is a wrapper over the upstream OpenTelemetry Collector `loadbalancing` exporter. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} Multiple `otelcol.exporter.loadbalancing` components can be specified by giving them different labels. -The decision which backend to use depends on the trace ID or the service name. -The backend load doesn't influence the choice. Even though this load-balancer won't do -round-robin balancing of the batches, the load distribution should be very similar among backends, +The decision which backend to use depends on the trace ID or the service name. +The backend load doesn't influence the choice. Even though this load-balancer won't do +round-robin balancing of the batches, the load distribution should be very similar among backends, with a standard deviation under 5% at the current configuration. `otelcol.exporter.loadbalancing` is especially useful for backends configured with tail-based samplers which choose a backend based on the view of the full trace. -When a list of backends is updated, some of the signals will be rerouted to different backends. +When a list of backends is updated, some of the signals will be rerouted to different backends. Around R/N of the "routes" will be rerouted differently, where: * A "route" is either a trace ID or a service name mapped to a certain backend. @@ -63,13 +59,13 @@ otelcol.exporter.loadbalancing "LABEL" { `otelcol.exporter.loadbalancing` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- +Name | Type | Description | Default | Required +--------------|----------|--------------------------------------|-------------|--------- `routing_key` | `string` | Routing strategy for load balancing. | `"traceID"` | no The `routing_key` attribute determines how to route signals across endpoints. Its value could be one of the following: * `"service"`: spans with the same `service.name` will be exported to the same backend. -This is useful when using processors like the span metrics, so all spans for each service are sent to consistent Agent instances +This is useful when using processors like the span metrics, so all spans for each service are sent to consistent Agent instances for metric collection. Otherwise, metrics for the same services would be sent to different Agents, making aggregations inaccurate. * `"traceID"`: spans belonging to the same traceID will be exported to the same backend. @@ -78,20 +74,20 @@ for metric collection. Otherwise, metrics for the same services would be sent to The following blocks are supported inside the definition of `otelcol.exporter.loadbalancing`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -resolver | [resolver][] | Configures discovering the endpoints to export to. | yes -resolver > static | [static][] | Static list of endpoints to export to. | no -resolver > dns | [dns][] | DNS-sourced list of endpoints to export to. | no -resolver > kubernetes | [kubernetes][] | Kubernetes-sourced list of endpoints to export to. | no -protocol | [protocol][] | Protocol settings. Only OTLP is supported at the moment. | no -protocol > otlp | [otlp][] | Configures an OTLP exporter. | no -protocol > otlp > client | [client][] | Configures the exporter gRPC client. | no -protocol > otlp > client > tls | [tls][] | Configures TLS for the gRPC client. | no -protocol > otlp > client > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no -protocol > otlp > queue | [queue][] | Configures batching of data before sending. | no -protocol > otlp > retry | [retry][] | Configures retry mechanism for failed requests. | no -debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no +Hierarchy | Block | Description | Required +-------------------------------------|-------------------|----------------------------------------------------------------------------|--------- +resolver | [resolver][] | Configures discovering the endpoints to export to. | yes +resolver > static | [static][] | Static list of endpoints to export to. | no +resolver > dns | [dns][] | DNS-sourced list of endpoints to export to. | no +resolver > kubernetes | [kubernetes][] | Kubernetes-sourced list of endpoints to export to. | no +protocol | [protocol][] | Protocol settings. Only OTLP is supported at the moment. | no +protocol > otlp | [otlp][] | Configures an OTLP exporter. | no +protocol > otlp > client | [client][] | Configures the exporter gRPC client. | no +protocol > otlp > client > tls | [tls][] | Configures TLS for the gRPC client. | no +protocol > otlp > client > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no +protocol > otlp > queue | [queue][] | Configures batching of data before sending. | no +protocol > otlp > retry | [retry][] | Configures retry mechanism for failed requests. | no +debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no The `>` symbol indicates deeper levels of nesting. For example, `resolver > static` refers to a `static` block defined inside a `resolver` block. @@ -113,8 +109,8 @@ refers to a `static` block defined inside a `resolver` block. The `resolver` block configures how to retrieve the endpoint to which this exporter will send data. -Inside the `resolver` block, either the [dns][] block or the [static][] block -should be specified. If both `dns` and `static` are specified, `dns` takes precedence. +Inside the `resolver` block, either the [dns][] block or the [static][] block should be specified. +If both `dns` and `static` are specified, `dns` takes precedence. ### static block @@ -122,9 +118,9 @@ The `static` block configures a list of endpoints which this exporter will send The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`hostnames` | `list(string)` | List of endpoints to export to. | | yes +Name | Type | Description | Default | Required +------------|----------------|---------------------------------|---------|--------- +`hostnames` | `list(string)` | List of endpoints to export to. | | yes ### dns block @@ -134,11 +130,11 @@ as the endpoint to which to export data to. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`hostname` | `string` | DNS hostname to resolve. | | yes -`interval` | `duration` | Resolver interval. | `"5s"` | no -`timeout` | `duration` | Resolver timeout. | `"1s"` | no +Name | Type | Description | Default | Required +-----------|------------|-----------------------------------------------------------------------|----------|--------- +`hostname` | `string` | DNS hostname to resolve. | | yes +`interval` | `duration` | Resolver interval. | `"5s"` | no +`timeout` | `duration` | Resolver timeout. | `"1s"` | no `port` | `string` | Port to be used with the IP addresses resolved from the DNS hostname. | `"4317"` | no ### kubernetes block @@ -149,9 +145,9 @@ The `kubernetes` resolver has a much faster response time than the `dns` resolve The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`service` | `string` | Kubernetes service to resolve. | | yes +Name | Type | Description | Default | Required +----------|----------------|-------------------------------------------------------------|----------|--------- +`service` | `string` | Kubernetes service to resolve. | | yes `ports` | `list(number)` | Ports to use with the IP addresses resolved from `service`. | `[4317]` | no If no namespace is specified inside `service`, an attempt will be made to infer the namespace for this Agent. @@ -178,22 +174,22 @@ The endpoints used by the client block are the ones from the `resolver` block The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no -`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no -`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no -`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no -`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no -`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no -`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no -`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no +Name | Type | Description | Default | Required +--------------------|----------------------------|----------------------------------------------------------------------------------|--------------|--------- +`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no +`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no +`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no +`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no +`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no +`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no +`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no +`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-compression-field.md" source="alloy" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-grpc-balancer-name.md" source="alloy" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-grpc-authority.md" source="alloy" version="" >}} You can configure an HTTP proxy with the following environment variables: @@ -219,10 +215,9 @@ able to handle and proxy HTTP/2 traffic. ### tls block -The `tls` block configures TLS settings used for the connection to the gRPC -server. +The `tls` block configures TLS settings used for the connection to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### keepalive block @@ -231,36 +226,34 @@ connections. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`ping_wait` | `duration` | How often to ping the server after no activity. | | no -`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no -`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no +Name | Type | Description | Default | Required +------------------------|------------|--------------------------------------------------------------------------------------------|---------|--------- +`ping_wait` | `duration` | How often to ping the server after no activity. | | no +`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no +`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no ### queue block -The `queue` block configures an in-memory buffer of batches before data is sent -to the gRPC server. +The `queue` block configures an in-memory buffer of batches before data is sent to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-queue-block.md" source="alloy" version="" >}} ### retry block -The `retry` block configures how failed requests to the gRPC server are -retried. +The `retry` block configures how failed requests to the gRPC server are retried. -{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-retry-block.md" source="alloy" version="" >}} ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` OTLP-formatted data for telemetry signals of these types: @@ -275,7 +268,7 @@ Name | Type | Description Different {{< param "PRODUCT_NAME" >}} components require different load-balancing strategies. The use of `otelcol.exporter.loadbalancing` is only necessary for [stateful Flow components][stateful-and-stateless-components]. -[stateful-and-stateless-components]: {{< relref "../../get-started/deploy-agent.md#stateful-and-stateless-components" >}} +[stateful-and-stateless-components]: ../../../get-started/deploy-alloy/#stateful-and-stateless-components ### otelcol.processor.tail_sampling diff --git a/docs/sources/flow/reference/components/otelcol.exporter.logging.md b/docs/sources/reference/components/otelcol.exporter.logging.md similarity index 70% rename from docs/sources/flow/reference/components/otelcol.exporter.logging.md rename to docs/sources/reference/components/otelcol.exporter.logging.md index 51a044b130..942f7e6bf0 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.logging.md +++ b/docs/sources/reference/components/otelcol.exporter.logging.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.logging/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.logging/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.logging/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.logging/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.logging/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.logging/ description: Learn about otelcol.exporter.logging title: otelcol.exporter.logging --- @@ -17,14 +12,15 @@ and writes them to the console. This component writes logs at the info level. The [logging config block][] must be configured to write logs at the info level. -> **NOTE**: `otelcol.exporter.logging` is a wrapper over the upstream -> OpenTelemetry Collector `logging` exporter. Bug reports or feature requests will -> be redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.exporter.logging` is a wrapper over the upstream OpenTelemetry Collector `logging` exporter. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} Multiple `otelcol.exporter.logging` components can be specified by giving them different labels. -[logging config block]: {{< relref "../config-blocks/logging.md" >}} +[logging config block]: ../../config-blocks/logging/ ## Usage @@ -36,11 +32,11 @@ otelcol.exporter.logging "LABEL" { } `otelcol.exporter.logging` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`verbosity` | `string` | Verbosity of the generated logs. | `"normal"` | no -`sampling_initial` | `int` | Number of messages initially logged each second. | `2` | no -`sampling_thereafter` | `int` | Sampling rate after the initial messages are logged. | `500` | no +Name | Type | Description | Default | Required +----------------------|----------|------------------------------------------------------|------------|--------- +`verbosity` | `string` | Verbosity of the generated logs. | `"normal"` | no +`sampling_initial` | `int` | Number of messages initially logged each second. | `2` | no +`sampling_thereafter` | `int` | Sampling rate after the initial messages are logged. | `500` | no The `verbosity` argument must be one of `"basic"`, `"normal"`, or `"detailed"`. @@ -49,8 +45,8 @@ The `verbosity` argument must be one of `"basic"`, `"normal"`, or `"detailed"`. The following blocks are supported inside the definition of `otelcol.exporter.logging`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- +Hierarchy | Block | Description | Required +--------------|-------------------|----------------------------------------------------------------------------|--------- debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no The `>` symbol indicates deeper levels of nesting. For example, `client > tls` @@ -60,14 +56,14 @@ refers to a `tls` block defined inside a `client` block. ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, diff --git a/docs/sources/flow/reference/components/otelcol.exporter.loki.md b/docs/sources/reference/components/otelcol.exporter.loki.md similarity index 70% rename from docs/sources/flow/reference/components/otelcol.exporter.loki.md rename to docs/sources/reference/components/otelcol.exporter.loki.md index 8fe0d1ec83..f6e73c7b22 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.loki.md +++ b/docs/sources/reference/components/otelcol.exporter.loki.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.loki/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.loki/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.loki/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.loki/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.loki/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.loki/ description: Learn about otelcol.exporter.loki title: otelcol.exporter.loki --- @@ -15,8 +10,9 @@ title: otelcol.exporter.loki components, converts them to Loki-formatted log entries, and forwards them to `loki` components. -> **NOTE**: `otelcol.exporter.loki` is a custom component unrelated to the -> `lokiexporter` from the OpenTelemetry Collector. +{{< admonition type="note" >}} +`otelcol.exporter.loki` is a custom component unrelated to the `lokiexporter` from the OpenTelemetry Collector. +{{< /admonition >}} The attributes of the OTLP log are not converted to Loki attributes by default. To convert them, the OTLP log should contain special "hint" attributes: @@ -25,12 +21,10 @@ To convert them, the OTLP log should contain special "hint" attributes: * To convert OTLP log attributes to Loki labels, use the `loki.attribute.labels` hint attribute. -Labels will be translated to a [Prometheus format][], which is more constrained -than the OTLP format. For examples on label translation, see the -[Converting OTLP attributes to Loki labels][] section. +Labels will be translated to a [Prometheus format][], which is more constrained than the OTLP format. +For examples on label translation, see the [Converting OTLP attributes to Loki labels][] section. -Multiple `otelcol.exporter.loki` components can be specified by giving them -different labels. +Multiple `otelcol.exporter.loki` components can be specified by giving them different labels. [Converting OTLP attributes to Loki labels]: #converting-otlp-attributes-to-loki-labels @@ -46,16 +40,16 @@ otelcol.exporter.loki "LABEL" { `otelcol.exporter.loki` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`forward_to` | `list(receiver)` | Where to forward converted Loki logs. | | yes +Name | Type | Description | Default | Required +-------------|------------------|---------------------------------------|---------|--------- +`forward_to` | `list(receiver)` | Where to forward converted Loki logs. | | yes ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for logs. Other telemetry signals are ignored. @@ -108,16 +102,16 @@ The example below will convert the following attributes to Loki labels: Labels will be translated to a [Prometheus format][]. For example: -| OpenTelemetry Attribute | Prometheus Label | -|---|---| -| `name` | `name` | -| `host.name` | `host_name` | -| `host_name` | `host_name` | -| `name (of the host)` | `name__of_the_host_` | -| `2 cents` | `key_2_cents` | -| `__name` | `__name` | -| `_name` | `key_name` | -| `_name` | `_name` (if `PermissiveLabelSanitization` is enabled) | +| OpenTelemetry Attribute | Prometheus Label | +|-------------------------|-------------------------------------------------------| +| `name` | `name` | +| `host.name` | `host_name` | +| `host_name` | `host_name` | +| `name (of the host)` | `name__of_the_host_` | +| `2 cents` | `key_2_cents` | +| `__name` | `__name` | +| `_name` | `key_name` | +| `_name` | `_name` (if `PermissiveLabelSanitization` is enabled) | ```river otelcol.receiver.otlp "default" { @@ -134,13 +128,13 @@ otelcol.processor.attributes "default" { action = "insert" value = "event.domain, event.name" } - + action { key = "loki.resource.labels" action = "insert" value = "service.name, service.namespace" } - + output { logs = [otelcol.exporter.loki.default.input] } diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md b/docs/sources/reference/components/otelcol.exporter.otlp.md similarity index 51% rename from docs/sources/flow/reference/components/otelcol.exporter.otlp.md rename to docs/sources/reference/components/otelcol.exporter.otlp.md index fce2576d8e..6230d97b35 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md +++ b/docs/sources/reference/components/otelcol.exporter.otlp.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.otlp/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.otlp/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.otlp/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlp/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.otlp/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.otlp/ description: Learn about otelcol.exporter.otlp title: otelcol.exporter.otlp --- @@ -35,23 +30,23 @@ otelcol.exporter.otlp "LABEL" { `otelcol.exporter.otlp` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`timeout` | `duration` | Time to wait before marking a request as failed. | `"5s"` | no +Name | Type | Description | Default | Required +----------|------------|--------------------------------------------------|---------|--------- +`timeout` | `duration` | Time to wait before marking a request as failed. | `"5s"` | no ## Blocks The following blocks are supported inside the definition of `otelcol.exporter.otlp`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | Configures the gRPC server to send telemetry data to. | yes -client > tls | [tls][] | Configures TLS for the gRPC client. | no -client > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no -sending_queue | [sending_queue][] | Configures batching of data before sending. | no -retry_on_failure | [retry_on_failure][] | Configures retry mechanism for failed requests. | no -debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no +Hierarchy | Block | Description | Required +-------------------|----------------------|----------------------------------------------------------------------------|--------- +client | [client][] | Configures the gRPC server to send telemetry data to. | yes +client > tls | [tls][] | Configures TLS for the gRPC client. | no +client > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no +sending_queue | [sending_queue][] | Configures batching of data before sending. | no +retry_on_failure | [retry_on_failure][] | Configures retry mechanism for failed requests. | no +debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no The `>` symbol indicates deeper levels of nesting. For example, `client > tls` refers to a `tls` block defined inside a `client` block. @@ -69,23 +64,23 @@ The `client` block configures the gRPC client used by the component. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | `host:port` to send telemetry data to. | | yes -`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no -`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no -`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no -`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no -`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no -`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no -`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no -`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no +Name | Type | Description | Default | Required +--------------------|----------------------------|----------------------------------------------------------------------------------|--------------|--------- +`endpoint` | `string` | `host:port` to send telemetry data to. | | yes +`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no +`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no +`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no +`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no +`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no +`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no +`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no +`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no -{{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-compression-field.md" source="alloy" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-balancer-name.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-grpc-balancer-name.md" source="alloy" version="" >}} -{{< docs/shared lookup="flow/reference/components/otelcol-grpc-authority.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-grpc-authority.md" source="alloy" version="" >}} An HTTP proxy can be configured through the following environment variables: @@ -114,50 +109,50 @@ able to handle and proxy HTTP/2 traffic. The `tls` block configures TLS settings used for the connection to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} -> **NOTE**: `otelcol.exporter.otlp` uses gRPC, which does not allow you to send sensitive credentials (like `auth`) over insecure channels. -> Sending sensitive credentials over insecure non-TLS connections is supported by non-gRPC exporters such as [otelcol.exporter.otlphttp][]. +{{< admonition type="note" >}} +`otelcol.exporter.otlp` uses gRPC, which does not allow you to send sensitive credentials (like `auth`) over insecure channels. +Sending sensitive credentials over insecure non-TLS connections is supported by non-gRPC exporters such as [otelcol.exporter.otlphttp][]. -[otelcol.exporter.otlphttp]: {{< relref "./otelcol.exporter.otlphttp.md" >}} +[otelcol.exporter.otlphttp]: ../otelcol.exporter.otlphttp/ +{{< /admonition >}} ### keepalive block -The `keepalive` block configures keepalive settings for gRPC client -connections. +The `keepalive` block configures keepalive settings for gRPC client connections. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`ping_wait` | `duration` | How often to ping the server after no activity. | | no -`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no -`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no +Name | Type | Description | Default | Required +------------------------|------------|--------------------------------------------------------------------------------------------|---------|--------- +`ping_wait` | `duration` | How often to ping the server after no activity. | | no +`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no +`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no ### sending_queue block The `sending_queue` block configures an in-memory buffer of batches before data is sent to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-queue-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-queue-block.md" source="alloy" version="" >}} ### retry_on_failure block -The `retry_on_failure` block configures how failed requests to the gRPC server are -retried. +The `retry_on_failure` block configures how failed requests to the gRPC server are retried. -{{< docs/shared lookup="flow/reference/components/otelcol-retry-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-retry-block.md" source="alloy" version="" >}} ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, diff --git a/docs/sources/reference/components/otelcol.exporter.otlphttp.md b/docs/sources/reference/components/otelcol.exporter.otlphttp.md new file mode 100644 index 0000000000..aa643c15d3 --- /dev/null +++ b/docs/sources/reference/components/otelcol.exporter.otlphttp.md @@ -0,0 +1,166 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.otlphttp/ +description: Learn about otelcol.exporter.otlphttp +title: otelcol.exporter.otlphttp +--- + +# otelcol.exporter.otlphttp + +`otelcol.exporter.otlphttp` accepts telemetry data from other `otelcol` +components and writes them over the network using the OTLP HTTP protocol. + +> **NOTE**: `otelcol.exporter.otlphttp` is a wrapper over the upstream +> OpenTelemetry Collector `otlphttp` exporter. Bug reports or feature requests +> will be redirected to the upstream repository, if necessary. + +Multiple `otelcol.exporter.otlphttp` components can be specified by giving them +different labels. + +## Usage + +```river +otelcol.exporter.otlphttp "LABEL" { + client { + endpoint = "HOST:PORT" + } +} +``` + +## Arguments + +`otelcol.exporter.otlphttp` supports the following arguments: + +Name | Type | Description | Default | Required +-------------------|----------|----------------------------------|-----------------------------------|--------- +`metrics_endpoint` | `string` | The endpoint to send metrics to. | `client.endpoint + "/v1/metrics"` | no +`logs_endpoint` | `string` | The endpoint to send logs to. | `client.endpoint + "/v1/logs"` | no +`traces_endpoint` | `string` | The endpoint to send traces to. | `client.endpoint + "/v1/traces"` | no + +The default value depends on the `endpoint` field set in the required `client` +block. If set, these arguments override the `client.endpoint` field for the +corresponding signal. + +## Blocks + +The following blocks are supported inside the definition of +`otelcol.exporter.otlphttp`: + +Hierarchy | Block | Description | Required +-----------------|----------------------|----------------------------------------------------------------------------|--------- +client | [client][] | Configures the HTTP server to send telemetry data to. | yes +client > tls | [tls][] | Configures TLS for the HTTP client. | no +sending_queue | [sending_queue][] | Configures batching of data before sending. | no +retry_on_failure | [retry_on_failure][] | Configures retry mechanism for failed requests. | no +debug_metrics | [debug_metrics][] | Configures the metrics that this component generates to monitor its state. | no + +The `>` symbol indicates deeper levels of nesting. For example, `client > tls` +refers to a `tls` block defined inside a `client` block. + +[client]: #client-block +[tls]: #tls-block +[sending_queue]: #sending_queue-block +[retry_on_failure]: #retry_on_failure-block +[debug_metrics]: #debug_metrics-block + +### client block + +The `client` block configures the HTTP client used by the component. + +The following arguments are supported: + +Name | Type | Description | Default | Required +--------------------------|----------------------------|------------------------------------------------------------------------------|------------|--------- +`endpoint` | `string` | The target URL to send telemetry data to. | | yes +`read_buffer_size` | `string` | Size of the read buffer the HTTP client uses for reading server responses. | `0` | no +`write_buffer_size` | `string` | Size of the write buffer the HTTP client uses for writing requests. | `"512KiB"` | no +`timeout` | `duration` | Time to wait before marking a request as failed. | `"30s"` | no +`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no +`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no +`max_idle_conns` | `int` | Limits the number of idle HTTP connections the client can keep open. | `100` | no +`max_idle_conns_per_host` | `int` | Limits the number of idle HTTP connections the host can keep open. | `0` | no +`max_conns_per_host` | `int` | Limits the total (dialing,active, and idle) number of connections per host. | `0` | no +`idle_conn_timeout` | `duration` | Time to wait before an idle connection closes itself. | `"90s"` | no +`disable_keep_alives` | `bool` | Disable HTTP keep-alive. | `false` | no +`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no + +Setting `disable_keep_alives` to `true` will result in significant overhead establishing a new HTTP(s) connection for every request. +Before enabling this option, consider whether changes to idle connection settings can achieve your goal. + +{{< docs/shared lookup="reference/components/otelcol-compression-field.md" source="alloy" version="" >}} + +### tls block + +The `tls` block configures TLS settings used for the connection to the HTTP +server. + +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} + +### sending_queue block + +The `sending_queue` block configures an in-memory buffer of batches before data is sent +to the HTTP server. + +{{< docs/shared lookup="reference/components/otelcol-queue-block.md" source="alloy" version="" >}} + +### retry_on_failure block + +The `retry_on_failure` block configures how failed requests to the HTTP server are +retried. + +{{< docs/shared lookup="reference/components/otelcol-retry-block.md" source="alloy" version="" >}} + +### debug_metrics block + +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} + +## Exported fields + +The following fields are exported and can be referenced by other components: + +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- +`input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. + +`input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, +logs, or traces). + +## Component health + +`otelcol.exporter.otlphttp` is only reported as unhealthy if given an invalid +configuration. + +## Debug information + +`otelcol.exporter.otlphttp` does not expose any component-specific debug +information. + +## Example + +This example creates an exporter to send data to a locally running Grafana +Tempo without TLS: + +```river +otelcol.exporter.otlphttp "tempo" { + client { + endpoint = "http://tempo:4317" + tls { + insecure = true + insecure_skip_verify = true + } + } +} +``` + + +## Compatible components + +`otelcol.exporter.otlphttp` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`](../../compatibility/#opentelemetry-otelcolconsumer-consumers) + +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md b/docs/sources/reference/components/otelcol.exporter.prometheus.md similarity index 69% rename from docs/sources/flow/reference/components/otelcol.exporter.prometheus.md rename to docs/sources/reference/components/otelcol.exporter.prometheus.md index 33328e6d2a..fc21c7cf30 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md +++ b/docs/sources/reference/components/otelcol.exporter.prometheus.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.exporter.prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.exporter.prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.exporter.prometheus/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.prometheus/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.exporter.prometheus/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.exporter.prometheus/ description: Learn about otelcol.exporter.prometheus title: otelcol.exporter.prometheus --- @@ -15,16 +10,16 @@ title: otelcol.exporter.prometheus `otelcol` components, converts metrics to Prometheus-formatted metrics, and forwards the resulting metrics to `prometheus` components. -> **NOTE**: `otelcol.exporter.prometheus` is a custom component unrelated to the -> `prometheus` exporter from OpenTelemetry Collector. -> -> Conversion of metrics are done according to the OpenTelemetry -> [Metrics Data Model][] specification. +{{< admonition type="note" >}} +`otelcol.exporter.prometheus` is a custom component unrelated to the `prometheus` exporter from OpenTelemetry Collector. -Multiple `otelcol.exporter.prometheus` components can be specified by giving them -different labels. +Conversion of metrics are done according to the OpenTelemetry [Metrics Data Model][] specification. [Metrics Data Model]: https://opentelemetry.io/docs/reference/specification/metrics/data-model/ +{{< /admonition >}} + +Multiple `otelcol.exporter.prometheus` components can be specified by giving them +different labels. ## Usage @@ -38,17 +33,17 @@ otelcol.exporter.prometheus "LABEL" { `otelcol.exporter.prometheus` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- |-----------------------------------------------------------| ------- | -------- -`include_target_info` | `boolean` | Whether to include `target_info` metrics. | `true` | no -`include_scope_info` | `boolean` | Whether to include `otel_scope_info` metrics. | `false` | no -`include_scope_labels` | `boolean` | Whether to include additional OTLP labels in all metrics. | `true` | no -`add_metric_suffixes` | `boolean` | Whether to add type and unit suffixes to metrics names. | `true` | no -`gc_frequency` | `duration` | How often to clean up stale metrics from memory. | `"5m"` | no -`forward_to` | `list(MetricsReceiver)` | Where to forward converted Prometheus metrics. | | yes -`resource_to_telemetry_conversion` | `boolean` | Whether to convert OTel resource attributes to Prometheus labels. | `false` | no - -By default, OpenTelemetry resources are converted into `target_info` metrics. +Name | Type | Description | Default | Required +-----------------------------------|-------------------------|-------------------------------------------------------------------|---------|--------- +`include_target_info` | `boolean` | Whether to include `target_info` metrics. | `true` | no +`include_scope_info` | `boolean` | Whether to include `otel_scope_info` metrics. | `false` | no +`include_scope_labels` | `boolean` | Whether to include additional OTLP labels in all metrics. | `true` | no +`add_metric_suffixes` | `boolean` | Whether to add type and unit suffixes to metrics names. | `true` | no +`gc_frequency` | `duration` | How often to clean up stale metrics from memory. | `"5m"` | no +`forward_to` | `list(MetricsReceiver)` | Where to forward converted Prometheus metrics. | | yes +`resource_to_telemetry_conversion` | `boolean` | Whether to convert OTel resource attributes to Prometheus labels. | `false` | no + +By default, OpenTelemetry resources are converted into `target_info` metrics. OpenTelemetry instrumentation scopes are converted into `otel_scope_info` metrics. Set the `include_scope_info` and `include_target_info` arguments to `false`, respectively, to disable the custom metrics. @@ -59,23 +54,21 @@ When `include_scope_labels` is `true` the `otel_scope_name` and When `include_target_info` is true, OpenTelemetry Collector resources are converted into `target_info` metrics. {{< admonition type="note" >}} - -OTLP metrics can have a lot of resource attributes. +OTLP metrics can have a lot of resource attributes. Setting `resource_to_telemetry_conversion` to `true` would convert all of them to Prometheus labels, which may not be what you want. -Instead of using `resource_to_telemetry_conversion`, most users need to use `otelcol.processor.transform` -to convert OTLP resource attributes to OTLP metric datapoint attributes before using `otelcol.exporter.prometheus`. +Instead of using `resource_to_telemetry_conversion`, most users need to use `otelcol.processor.transform` +to convert OTLP resource attributes to OTLP metric datapoint attributes before using `otelcol.exporter.prometheus`. See [Creating Prometheus labels from OTLP resource attributes][] for an example. [Creating Prometheus labels from OTLP resource attributes]: #creating-prometheus-labels-from-otlp-resource-attributes - {{< /admonition >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for metrics. Other telemetry signals are ignored. @@ -89,13 +82,11 @@ The following are dropped during the conversion process: ## Component health -`otelcol.exporter.prometheus` is only reported as unhealthy if given an invalid -configuration. +`otelcol.exporter.prometheus` is only reported as unhealthy if given an invalid configuration. ## Debug information -`otelcol.exporter.prometheus` does not expose any component-specific debug -information. +`otelcol.exporter.prometheus` does not expose any component-specific debug information. ## Example @@ -127,7 +118,7 @@ prometheus.remote_write "mimir" { ## Create Prometheus labels from OTLP resource attributes This example uses `otelcol.processor.transform` to add extra `key1` and `key2` OTLP metric datapoint attributes from the -`key1` and `key2` OTLP resource attributes. +`key1` and `key2` OTLP resource attributes. `otelcol.exporter.prometheus` then converts `key1` and `key2` to Prometheus labels along with any other OTLP metric datapoint attributes. diff --git a/docs/sources/reference/components/otelcol.extension.jaeger_remote_sampling.md b/docs/sources/reference/components/otelcol.extension.jaeger_remote_sampling.md new file mode 100644 index 0000000000..140c2d2c6d --- /dev/null +++ b/docs/sources/reference/components/otelcol.extension.jaeger_remote_sampling.md @@ -0,0 +1,301 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.extension.jaeger_remote_sampling/ +description: Learn about otelcol.extension.jaeger_remote_sampling +label: + stage: experimental +title: otelcol.extension.jaeger_remote_sampling +--- + +# otelcol.extension.jaeger_remote_sampling + +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} + +`otelcol.extension.jaeger_remote_sampling` serves a specified Jaeger remote sampling document. + +{{< admonition type="note" >}} +`otelcol.extension.jaeger_remote_sampling` is a wrapper over the upstream OpenTelemetry Collector `jaegerremotesampling` extension. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} + +Multiple `otelcol.extension.jaeger_remote_sampling` components can be specified by giving them different labels. + +## Usage + +```river +otelcol.extension.jaeger_remote_sampling "LABEL" { + source { + } +} +``` + +## Arguments + +`otelcol.extension.jaeger_remote_sampling` doesn't support any arguments and is configured fully through inner blocks. + +## Blocks + +The following blocks are supported inside the definition of +`otelcol.extension.jaeger_remote_sampling`: + +Hierarchy | Block | Description | Required +--------------------------------------|------------------------|----------------------------------------------------------------------------------|--------- +http | [http][] | Configures the http server to serve Jaeger remote sampling. | no +http > tls | [tls][] | Configures TLS for the HTTP server. | no +http > cors | [cors][] | Configures CORS for the HTTP server. | no +grpc | [grpc][] | Configures the grpc server to serve Jaeger remote sampling. | no +grpc > tls | [tls][] | Configures TLS for the gRPC server. | no +grpc > keepalive | [keepalive][] | Configures keepalive settings for the configured server. | no +grpc > keepalive > server_parameters | [server_parameters][] | Server parameters used to configure keepalive settings. | no +grpc > keepalive > enforcement_policy | [enforcement_policy][] | Enforcement policy for keepalive settings. | no +source | [source][] | Configures the Jaeger remote sampling document. | yes +source > remote | [remote][] | Configures the gRPC client used to retrieve the Jaeger remote sampling document. | no +source > remote > tls | [tls][] | Configures TLS for the gRPC client. | no +source > remote > keepalive | [keepalive][] | Configures keepalive settings for the gRPC client. | no + +The `>` symbol indicates deeper levels of nesting. For example, `grpc > tls` +refers to a `tls` block defined inside a `grpc` block. + +[http]: #http-block +[tls]: #tls-block +[cors]: #cors-block +[grpc]: #grpc-block +[keepalive]: #keepalive-block +[server_parameters]: #server_parameters-block +[enforcement_policy]: #enforcement_policy-block +[source]: #source-block +[remote]: #remote-block +[tls_client]: #tls-client-block +[keepalive_client]: #keepalive-client-block + +### http block + +The `http` block configures an HTTP server which serves the Jaeger remote sampling document. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------------|-----------|-----------------------------------------------------------------------|------------------|--------- +`endpoint` | `string` | `host:port` to listen for traffic on. | `"0.0.0.0:5778"` | no +`max_request_body_size` | `string` | Maximum request body size the server will allow. No limit when unset. | | no +`include_metadata` | `boolean` | Propagate incoming connection metadata to downstream consumers. | | no + +### tls block + +The `tls` block configures TLS settings used for a server. If the `tls` block +isn't provided, TLS won't be used for connections to the server. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------|------------|---------------------------------------------------------------|-------------|--------- +`ca_file` | `string` | Path to the CA file. | | no +`cert_file` | `string` | Path to the TLS certificate. | | no +`key_file` | `string` | Path to the TLS certificate key. | | no +`min_version` | `string` | Minimum acceptable TLS version for connections. | `"TLS 1.2"` | no +`max_version` | `string` | Maximum acceptable TLS version for connections. | `"TLS 1.3"` | no +`reload_interval` | `duration` | Frequency to reload the certificates. | | no +`client_ca_file` | `string` | Path to the CA file used to authenticate client certificates. | | no + +### cors block + +The `cors` block configures CORS settings for an HTTP server. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------|----------------|----------------------------------------------------------|------------------------|--------- +`allowed_origins` | `list(string)` | Allowed values for the `Origin` header. | | no +`allowed_headers` | `list(string)` | Accepted headers from CORS requests. | `["X-Requested-With"]` | no +`max_age` | `number` | Configures the `Access-Control-Max-Age` response header. | | no + +The `allowed_headers` specifies which headers are acceptable from a CORS +request. The following headers are always implicitly allowed: + +* `Accept` +* `Accept-Language` +* `Content-Type` +* `Content-Language` + +If `allowed_headers` includes `"*"`, all headers will be permitted. + +### grpc block + +The `grpc` block configures a gRPC server which serves the Jaeger remote + sampling document. + +The following arguments are supported: + +Name | Type | Description | Default | Required +-------------------------|-----------|----------------------------------------------------------------------------|-------------------|--------- +`endpoint` | `string` | `host:port` to listen for traffic on. | `"0.0.0.0:14250"` | no +`transport` | `string` | Transport to use for the gRPC server. | `"tcp"` | no +`max_recv_msg_size` | `string` | Maximum size of messages the server will accept. 0 disables a limit. | | no +`max_concurrent_streams` | `number` | Limit the number of concurrent streaming RPC calls. | | no +`read_buffer_size` | `string` | Size of the read buffer the gRPC server will use for reading from clients. | `"512KiB"` | no +`write_buffer_size` | `string` | Size of the write buffer the gRPC server will use for writing to clients. | | no +`include_metadata` | `boolean` | Propagate incoming connection metadata to downstream consumers. | | no + +### keepalive block + +The `keepalive` block configures keepalive settings for connections to a gRPC +server. + +`keepalive` doesn't support any arguments and is configured fully through inner +blocks. + +### server_parameters block + +The `server_parameters` block controls keepalive and maximum age settings for gRPC +servers. + +The following arguments are supported: + +Name | Type | Description | Default | Required +---------------------------|------------|--------------------------------------------------------------------------------------|--------------|--------- +`max_connection_idle` | `duration` | Maximum age for idle connections. | `"infinity"` | no +`max_connection_age` | `duration` | Maximum age for non-idle connections. | `"infinity"` | no +`max_connection_age_grace` | `duration` | Time to wait before forcibly closing connections. | `"infinity"` | no +`time` | `duration` | How often to ping inactive clients to check for liveness. | `"2h"` | no +`timeout` | `duration` | Time to wait before closing inactive clients that do not respond to liveness checks. | `"20s"` | no + +### enforcement_policy block + +The `enforcement_policy` block configures the keepalive enforcement policy for +gRPC servers. The server will close connections from clients that violate the +configured policy. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------------|------------|-------------------------------------------------------------------------|---------|--------- +`min_time` | `duration` | Minimum time clients should wait before sending a keepalive ping. | `"5m"` | no +`permit_without_stream` | `boolean` | Allow clients to send keepalive pings when there are no active streams. | `false` | no + +### source block + +The `source` block configures the method of retrieving the Jaeger remote sampling document +that is served by the servers specified in the `grpc` and `http` blocks. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------|------------|---------------------------------------------------------------------------------|---------|--------- +`file` | `string` | A local file containing a Jaeger remote sampling document. | `""` | no +`reload_interval` | `duration` | The interval at which to reload the specified file. Leave at 0 to never reload. | `0` | no +`content` | `string` | A string containing the Jaeger remote sampling contents directly. | `""` | no + +Exactly one of the `file` argument, `content` argument or `remote` block must be specified. + +### remote block + +The `remote` block configures the gRPC client used by the component. + +The following arguments are supported: + +Name | Type | Description | Default | Required +--------------------|----------------------------|----------------------------------------------------------------------------------|--------------|--------- +`endpoint` | `string` | `host:port` to send telemetry data to. | | yes +`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no +`read_buffer_size` | `string` | Size of the read buffer the gRPC client to use for reading server responses. | | no +`write_buffer_size` | `string` | Size of the write buffer the gRPC client to use for writing requests. | `"512KiB"` | no +`wait_for_ready` | `boolean` | Waits for gRPC connection to be in the `READY` state before sending data. | `false` | no +`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no +`balancer_name` | `string` | Which gRPC client-side load balancer to use for requests. | `pick_first` | no +`authority` | `string` | Overrides the default `:authority` header in gRPC requests from the gRPC client. | | no +`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no + +{{< docs/shared lookup="reference/components/otelcol-compression-field.md" source="alloy" version="" >}} + +{{< docs/shared lookup="reference/components/otelcol-grpc-balancer-name.md" source="alloy" version="" >}} + +{{< docs/shared lookup="reference/components/otelcol-grpc-authority.md" source="alloy" version="" >}} + +An HTTP proxy can be configured through the following environment variables: + +* `HTTPS_PROXY` +* `NO_PROXY` + +The `HTTPS_PROXY` environment variable specifies a URL to use for proxying +requests. Connections to the proxy are established via [the `HTTP CONNECT` +method][HTTP CONNECT]. + +The `NO_PROXY` environment variable is an optional list of comma-separated +hostnames for which the HTTPS proxy should _not_ be used. Each hostname can be +provided as an IP address (`1.2.3.4`), an IP address in CIDR notation +(`1.2.3.4/8`), a domain name (`example.com`), or `*`. A domain name matches +that domain and all subdomains. A domain name with a leading "." +(`.example.com`) matches subdomains only. `NO_PROXY` is only read when +`HTTPS_PROXY` is set. + +Because `otelcol.extension.jaeger_remote_sampling` uses gRPC, the configured proxy server must be +able to handle and proxy HTTP/2 traffic. + +[HTTP CONNECT]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/CONNECT + +### tls client block + +The `tls` block configures TLS settings used for the connection to the gRPC +server. + +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} + +### keepalive client block + +The `keepalive` block configures keepalive settings for gRPC client +connections. + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------------|------------|--------------------------------------------------------------------------------------------|---------|--------- +`ping_wait` | `duration` | How often to ping the server after no activity. | | no +`ping_response_timeout` | `duration` | Time to wait before closing inactive connections if the server does not respond to a ping. | | no +`ping_without_stream` | `boolean` | Send pings even if there is no active stream request. | | no + +## Component health + +`otelcol.extension.jaeger_remote_sampling` is only reported as unhealthy if given an invalid +configuration. + +## Debug information + +`otelcol.extension.jaeger_remote_sampling` does not expose any component-specific debug information. + +## Examples + +### Serving from a file + +This example configures the Jaeger remote sampling extension to load a local json document and +serve it over the default http port of 5778. Currently this config style exists for consistency +with upstream Opentelemetry Collector components and may be removed. + +```river +otelcol.extension.jaeger_remote_sampling "example" { + http { + } + source { + file = "/path/to/jaeger-sampling.json" + reload_interval = "10s" + } +} +``` + +### Serving from another component + + +This example uses the output of a component to determine what sampling +rules to serve: + +```river +local.file "sampling" { + filename = "/path/to/jaeger-sampling.json" +} + +otelcol.extension.jaeger_remote_sampling "example" { + http { + } + source { + content = local.file.sampling.content + } +} +``` diff --git a/docs/sources/flow/reference/components/otelcol.processor.attributes.md b/docs/sources/reference/components/otelcol.processor.attributes.md similarity index 80% rename from docs/sources/flow/reference/components/otelcol.processor.attributes.md rename to docs/sources/reference/components/otelcol.processor.attributes.md index 6c07d1c713..c76c385b79 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.attributes.md +++ b/docs/sources/reference/components/otelcol.processor.attributes.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.attributes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.attributes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.attributes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.attributes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.attributes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.attributes/ description: Learn about otelcol.processor.attributes title: otelcol.processor.attributes --- @@ -13,15 +8,14 @@ title: otelcol.processor.attributes `otelcol.processor.attributes` accepts telemetry data from other `otelcol` components and modifies attributes of a span, log, or metric. -It also supports the ability to filter and match input data to determine if -it should be included or excluded for attribute modifications. +It also supports the ability to filter and match input data to determine if it should be included or excluded for attribute modifications. -> **NOTE**: `otelcol.processor.attributes` is a wrapper over the upstream -> OpenTelemetry Collector `attributes` processor. Bug reports or feature requests -> will be redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.processor.attributes` is a wrapper over the upstream OpenTelemetry Collector `attributes` processor. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} -You can specify multiple `otelcol.processor.attributes` components by giving them -different labels. +You can specify multiple `otelcol.processor.attributes` components by giving them different labels. ## Usage @@ -37,29 +31,29 @@ otelcol.processor.attributes "LABEL" { ## Arguments -`otelcol.processor.attributes` doesn't support any arguments and is configured fully -through inner blocks. +`otelcol.processor.attributes` doesn't support any arguments and is configured fully through inner blocks. ## Blocks The following blocks are supported inside the definition of `otelcol.processor.attributes`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes -action | [action][] | Actions to take on the attributes of incoming metrics/logs/traces. | no -include | [include][] | Filter for data included in this processor's actions. | no -include > regexp | [regexp][] | Regex cache settings. | no -include > attribute | [attribute][] | A list of attributes to match against. | no -include > resource | [resource][] | A list of items to match the resources against. | no -include > library | [library][] | A list of items to match the implementation library against. | no -include > log_severity | [library][] | How to match against a log record's SeverityNumber, if defined. | no -exclude | [exclude][] | Filter for data excluded from this processor's actions | no -exclude > regexp | [regexp][] | Regex cache settings. | no -exclude > attribute | [attribute][] | A list of attributes to match against. | no -exclude > resource | [resource][] | A list of items to match the resources against. | no -exclude > library | [library][] | A list of items to match the implementation library against. | no -exclude > log_severity | [log_severity][] | How to match against a log record's SeverityNumber, if defined. | no + +Hierarchy | Block | Description | Required +-----------------------|------------------|--------------------------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes +action | [action][] | Actions to take on the attributes of incoming metrics/logs/traces. | no +include | [include][] | Filter for data included in this processor's actions. | no +include > regexp | [regexp][] | Regex cache settings. | no +include > attribute | [attribute][] | A list of attributes to match against. | no +include > resource | [resource][] | A list of items to match the resources against. | no +include > library | [library][] | A list of items to match the implementation library against. | no +include > log_severity | [library][] | How to match against a log record's SeverityNumber, if defined. | no +exclude | [exclude][] | Filter for data excluded from this processor's actions | no +exclude > regexp | [regexp][] | Regex cache settings. | no +exclude > attribute | [attribute][] | A list of attributes to match against. | no +exclude > resource | [resource][] | A list of items to match the resources against. | no +exclude > library | [library][] | A list of items to match the implementation library against. | no +exclude > log_severity | [log_severity][] | How to match against a log record's SeverityNumber, if defined. | no The `>` symbol indicates deeper levels of nesting. For example, `include > attribute` refers to an `attribute` block defined inside an `include` block. @@ -82,15 +76,15 @@ The `action` block configures how to modify the span, log, or metric. The following attributes are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`key` | `string` | The attribute that the action relates to. | | yes -`action` | `string` | The type of action performed. | | yes -`value` | `any` | The value to populate for the key. | | no -`pattern` | `string` | A regex pattern. | `""` | no -`from_attribute` | `string` | The attribute from the input data used to populate the attribute value. | `""` | no -`from_context` | `string` | The context value used to populate the attribute value. | `""` | no -`converted_type` | `string` | The type to convert the attribute value to. | `""` | no +Name | Type | Description | Default | Required +-----------------|----------|-------------------------------------------------------------------------|---------|--------- +`key` | `string` | The attribute that the action relates to. | | yes +`action` | `string` | The type of action performed. | | yes +`value` | `any` | The value to populate for the key. | | no +`pattern` | `string` | A regex pattern. | `""` | no +`from_attribute` | `string` | The attribute from the input data used to populate the attribute value. | `""` | no +`from_context` | `string` | The context value used to populate the attribute value. | `""` | no +`converted_type` | `string` | The type to convert the attribute value to. | `""` | no The `value` data type must be either a number, string, or boolean. @@ -106,7 +100,7 @@ The supported values for `action` are: * The `key`attribute is required. It specifies the attribute to act upon. * One of the `value`, `from_attribute` or `from_context` attributes is required. -* `upsert`: Either inserts a new attribute in input data where the key does not already exist +* `upsert`: Either inserts a new attribute in input data where the key does not already exist or updates an attribute in input data where the key does exist. * The `key`attribute is required. It specifies the attribute to act upon. @@ -116,10 +110,10 @@ The supported values for `action` are: the value. If the attribute doesn't exist, no action is performed. * `from_context` specifies the context value used to populate the attribute value. If the key is prefixed with `metadata.`, the values are searched - in the receiver's transport protocol for additional information like gRPC Metadata or HTTP Headers. + in the receiver's transport protocol for additional information like gRPC Metadata or HTTP Headers. If the key is prefixed with `auth.`, the values are searched - in the authentication information set by the server authenticator. - Refer to the server authenticator's documentation part of your pipeline + in the authentication information set by the server authenticator. + Refer to the server authenticator's documentation part of your pipeline for more information about which attributes are available. If the key doesn't exist, no action is performed. If the key has multiple values the values will be joined with a `;` separator. @@ -128,12 +122,12 @@ The supported values for `action` are: * The `key` attribute and/or the `pattern` attributes is required. -* `extract`: Extracts values using a regular expression rule from the input key to target keys specified in the rule. - If a target key already exists, it will be overridden. Note: It behaves similarly to the Span Processor `to_attributes` +* `extract`: Extracts values using a regular expression rule from the input key to target keys specified in the rule. + If a target key already exists, it will be overridden. Note: It behaves similarly to the Span Processor `to_attributes` setting with the existing attribute as the source. * The `key` attribute is required. It specifies the attribute to extract values from. The value of `key` is NOT altered. - * The `pattern` attribute is required. It is the regex pattern used to extract attributes from the value of `key`. + * The `pattern` attribute is required. It is the regex pattern used to extract attributes from the value of `key`. The submatchers must be named. If attributes already exist, they will be overwritten. * `convert`: Converts an existing attribute to a specified type. @@ -149,14 +143,14 @@ The supported values for `action` are: The `include` block provides an option to include data being fed into the [action] blocks based on the properties of a span, log, or metric records. -{{< docs/shared lookup="flow/reference/components/match-properties-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/match-properties-block.md" source="alloy" version="" >}} One of the following is also required: -* For spans, one of `services`, `span_names`, `span_kinds`, [attribute][], [resource][], or [library][] must be specified +* For spans, one of `services`, `span_names`, `span_kinds`, [attribute][], [resource][], or [library][] must be specified with a non-empty value for a valid configuration. The `log_bodies`, `log_severity_texts`, `log_severity`, and `metric_names` attributes are invalid. -* For logs, one of `log_bodies`, `log_severity_texts`, `log_severity`, [attribute][], [resource][], or [library][] must be +* For logs, one of `log_bodies`, `log_severity_texts`, `log_severity`, [attribute][], [resource][], or [library][] must be specified with a non-empty value for a valid configuration. The `span_names`, `span_kinds`, `metric_names`, and `services` attributes are invalid. -* For metrics, `metric_names` must be specified with a valid non-empty value for a valid configuration. The `span_names`, +* For metrics, `metric_names` must be specified with a valid non-empty value for a valid configuration. The `span_names`, `span_kinds`, `log_bodies`, `log_severity_texts`, `log_severity`, `services`, [attribute][], [resource][], and [library][] attributes are invalid. If the configuration includes filters which are specific to a particular signal type, it is best to include only that signal type in the component's output. @@ -169,10 +163,12 @@ The `exclude` block provides an option to exclude data from being fed into the [ {{< admonition type="note" >}} Signals excluded by the `exclude` block will still be propagated to downstream components as-is. If you would like to not propagate certain signals to downstream components, -consider a processor such as [otelcol.processor.tail_sampling]({{< relref "./otelcol.processor.tail_sampling.md" >}}). +consider a processor such as [otelcol.processor.tail_sampling][]. + +[otelcol.processor.tail_sampling]: ../otelcol.processor.tail_sampling/ {{< /admonition >}} -{{< docs/shared lookup="flow/reference/components/match-properties-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/match-properties-block.md" source="alloy" version="" >}} One of the following is also required: * For spans, one of `services`, `span_names`, `span_kinds`, [attribute][], [resource][], or [library][] must be specified @@ -187,34 +183,34 @@ For example, adding a `span_names` filter could cause the component to error if ### regexp block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-regexp-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-regexp-block.md" source="alloy" version="" >}} ### attribute block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-attribute-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-attribute-block.md" source="alloy" version="" >}} ### resource block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-resource-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-resource-block.md" source="alloy" version="" >}} ### library block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-library-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-library-block.md" source="alloy" version="" >}} ### log_severity block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-log-severity-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-log-severity-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, logs, or traces). diff --git a/docs/sources/flow/reference/components/otelcol.processor.batch.md b/docs/sources/reference/components/otelcol.processor.batch.md similarity index 78% rename from docs/sources/flow/reference/components/otelcol.processor.batch.md rename to docs/sources/reference/components/otelcol.processor.batch.md index 7b461c1168..13821b0253 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.batch.md +++ b/docs/sources/reference/components/otelcol.processor.batch.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.batch/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.batch/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.batch/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.batch/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.batch/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.batch/ description: Learn about otelcol.processor.batch title: otelcol.processor.batch --- @@ -16,11 +11,9 @@ components and places them into batches. Batching improves the compression of data and reduces the number of outgoing network requests required to transmit data. This processor supports both size and time based batching. -We strongly recommend that you configure the batch processor on every Agent that -uses OpenTelemetry (otelcol) Flow components. The batch processor should be -defined in the pipeline after the `otelcol.processor.memory_limiter` as well -as any sampling processors. This is because batching should happen after any -data drops such as sampling. +We strongly recommend that you configure the batch processor on every {{< param "PRODUCT_NAME" >}} that uses OpenTelemetry (otelcol) {{< param "PRODUCT_ROOT_NAME" >}} components. +The batch processor should be defined in the pipeline after the `otelcol.processor.memory_limiter` as well as any sampling processors. +This is because batching should happen after any data drops such as sampling. > **NOTE**: `otelcol.processor.batch` is a wrapper over the upstream > OpenTelemetry Collector `batch` processor. Bug reports or feature requests @@ -45,13 +38,13 @@ otelcol.processor.batch "LABEL" { `otelcol.processor.batch` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`timeout` | `duration` | How long to wait before flushing the batch. | `"200ms"` | no -`send_batch_size` | `number` | Amount of data to buffer before flushing the batch. | `8192` | no -`send_batch_max_size` | `number` | Upper limit of a batch size. | `0` | no -`metadata_keys` | `list(string)` | Creates a different batcher for each key/value combination of metadata. | `[]` | no -`metadata_cardinality_limit` | `number` | Limit of the unique metadata key/value combinations. | `1000` | no +Name | Type | Description | Default | Required +-----------------------------|----------------|-------------------------------------------------------------------------|-----------|--------- +`timeout` | `duration` | How long to wait before flushing the batch. | `"200ms"` | no +`send_batch_size` | `number` | Amount of data to buffer before flushing the batch. | `8192` | no +`send_batch_max_size` | `number` | Upper limit of a batch size. | `0` | no +`metadata_keys` | `list(string)` | Creates a different batcher for each key/value combination of metadata. | `[]` | no +`metadata_cardinality_limit` | `number` | Limit of the unique metadata key/value combinations. | `1000` | no `otelcol.processor.batch` accumulates data into a batch until one of the following events happens: @@ -102,22 +95,22 @@ which defaults to 1000 to limit memory impact. The following blocks are supported inside the definition of `otelcol.processor.batch`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes +Hierarchy | Block | Description | Required +----------|------------|---------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes [output]: #output-block ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, @@ -226,7 +219,8 @@ otelcol.exporter.otlp "production" { } ``` -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ + ## Compatible components diff --git a/docs/sources/flow/reference/components/otelcol.processor.discovery.md b/docs/sources/reference/components/otelcol.processor.discovery.md similarity index 80% rename from docs/sources/flow/reference/components/otelcol.processor.discovery.md rename to docs/sources/reference/components/otelcol.processor.discovery.md index a294c8440d..4ce4b76e70 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.discovery.md +++ b/docs/sources/reference/components/otelcol.processor.discovery.md @@ -1,10 +1,6 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.discovery/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.discovery/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.discovery/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.discovery/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.discovery/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.discovery/ description: Learn about otelcol.processor.discovery title: otelcol.processor.discovery --- @@ -18,12 +14,10 @@ of labels for each discovered target. matching the one in the `__address__` label provided by the `discovery.*` component. {{< admonition type="note" >}} -`otelcol.processor.discovery` is a custom component unrelated to any -processors from the OpenTelemetry Collector. +`otelcol.processor.discovery` is a custom component unrelated to any processors from the OpenTelemetry Collector. {{< /admonition >}} -Multiple `otelcol.processor.discovery` components can be specified by giving them -different labels. +Multiple `otelcol.processor.discovery` components can be specified by giving them different labels. {{< admonition type="note" >}} It can be difficult to follow [OpenTelemetry semantic conventions][OTEL sem conv] when @@ -46,7 +40,7 @@ from Static mode's `prom_sd_operation_type`/`prom_sd_pod_associations` [configur [Prometheus data model]: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels [OTEL sem conv]: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/README.md -[Traces]: {{< relref "../../../static/configuration/traces-config.md" >}} +[Traces]: http://grafana.com/docs/agent/latest/static/configuration/traces-config/ {{< /admonition >}} ## Usage @@ -64,11 +58,11 @@ otelcol.processor.discovery "LABEL" { `otelcol.processor.discovery` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`targets` | `list(map(string))` | List of target labels to apply to the spans. | | yes -`operation_type` | `string` | Configures whether to update a span's attribute if it already exists. | `upsert` | no -`pod_associations` | `list(string)` | Configures how to decide the hostname of the span. | `["ip", "net.host.ip", "k8s.pod.ip", "hostname", "connection"]` | no +Name | Type | Description | Default | Required +-----------------|---------------------|-----------------------------------------------------------------------|----------|--------- +`targets` | `list(map(string))` | List of target labels to apply to the spans. | | yes +`operation_type` | `string` | Configures whether to update a span's attribute if it already exists. | `upsert` | no +`pod_associations` | `list(string)` | Configures how to decide the hostname of the span. | `["ip", "net.host.ip", "k8s.pod.ip", "hostname", "connection"]` | no `targets` could come from `discovery.*` components: 1. The `__address__` label will be matched against the IP address of incoming spans. @@ -98,22 +92,22 @@ only if `"ip"` has not already matched. The following blocks are supported inside the definition of `otelcol.processor.discovery`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes +Hierarchy | Block | Description | Required +----------|------------|---------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes [output]: #output-block ### output block -{{< docs/shared lookup="flow/reference/components/output-block-traces.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-traces.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` OTLP-formatted data for telemetry signals of these types: @@ -173,7 +167,7 @@ otelcol.processor.discovery "default" { ### Using a preconfigured list of attributes -It is not necessary to use a discovery component. In the example below, both a `test_label` and +It's not necessary to use a discovery component. In the example below, both a `test_label` and a `test.label.with.dots` resource attributes will be added to a span if its IP address is "1.2.2.2". The `__internal_label__` will be not be added to the span, because it begins with a double underscore (`__`). @@ -181,7 +175,7 @@ a double underscore (`__`). ```river otelcol.processor.discovery "default" { targets = [{ - "__address__" = "1.2.2.2", + "__address__" = "1.2.2.2", "__internal_label__" = "test_val", "test_label" = "test_val2", "test.label.with.dots" = "test.val2.with.dots"}] diff --git a/docs/sources/flow/reference/components/otelcol.processor.filter.md b/docs/sources/reference/components/otelcol.processor.filter.md similarity index 89% rename from docs/sources/flow/reference/components/otelcol.processor.filter.md rename to docs/sources/reference/components/otelcol.processor.filter.md index c82be95aa0..75746831b1 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.filter.md +++ b/docs/sources/reference/components/otelcol.processor.filter.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.filter/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.filter/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.filter/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.filter/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.filter/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.filter/ description: Learn about otelcol.processor.filter labels: stage: experimental @@ -13,7 +8,7 @@ title: otelcol.processor.filter # otelcol.processor.filter -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otelcol.processor.filter` accepts and filters telemetry data from other `otelcol` components using the [OpenTelemetry Transformation Language (OTTL)][OTTL]. @@ -25,8 +20,7 @@ A path is a reference to a telemetry data such as: * Instrumentation scope name. * Span attributes. -In addition to the [standard OTTL Converter functions][OTTL Converter functions], -the following metrics-only functions are used exclusively by the processor: +In addition to the [standard OTTL Converter functions][OTTL Converter functions], the following metrics-only functions are used exclusively by the processor: * [HasAttrKeyOnDataPoint][] * [HasAttrOnDataPoint][] @@ -41,15 +35,12 @@ the following metrics-only functions are used exclusively by the processor: {{< admonition type="note" >}} Raw River strings can be used to write OTTL statements. -For example, the OTTL statement `attributes["grpc"] == true` -is written in River as \`attributes["grpc"] == true\` - +For example, the OTTL statement `attributes["grpc"] == true` is written in River as \`attributes["grpc"] == true\` {{< /admonition >}} {{< admonition type="note" >}} -`otelcol.processor.filter` is a wrapper over the upstream -OpenTelemetry Collector `filter` processor. If necessary, bug reports or feature requests -will be redirected to the upstream repository. +`otelcol.processor.filter` is a wrapper over the upstream OpenTelemetry Collector `filter` processor. +If necessary, bug reports or feature requests will be redirected to the upstream repository. {{< /admonition >}} You can specify multiple `otelcol.processor.filter` components by giving them different labels. @@ -133,7 +124,7 @@ If all span events for a span are dropped, the span will be left intact. ### metrics block -The `metrics` block specifies statements that filter metric telemetry signals. +The `metrics` block specifies statements that filter metric telemetry signals. Only one `metrics` blocks can be specified. Name | Type | Description | Default | Required @@ -141,8 +132,7 @@ Name | Type | Description `metric` | `list(string)` | List of OTTL statements filtering OTLP metric. | | no `datapoint` | `list(string)` | List of OTTL statements filtering OTLP metric datapoints. | | no -The syntax of OTTL statements depends on the OTTL context. See the OpenTelemetry -documentation for more information: +The syntax of OTTL statements depends on the OTTL context. See the OpenTelemetry documentation for more information: * [OTTL metric context][] * [OTTL datapoint context][] @@ -157,30 +147,28 @@ If all datapoints for a metric are dropped, the metric will also be dropped. ### logs block -The `logs` block specifies statements that filter log telemetry signals. +The `logs` block specifies statements that filter log telemetry signals. Only `logs` blocks can be specified. Name | Type | Description | Default | Required --------------- | -------------- | ---------------------------------------------- | ------- | -------- `log_record` | `list(string)` | List of OTTL statements filtering OTLP metric. | | no -The syntax of OTTL statements depends on the OTTL context. See the OpenTelemetry -documentation for more information: +The syntax of OTTL statements depends on the OTTL context. See the OpenTelemetry documentation for more information: * [OTTL log context][] Only one of the statements inside the list of statements has to be satisfied. - ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: Name | Type | Description -------- | ------------------ | ----------- +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, @@ -290,7 +278,7 @@ Some values in the River strings are [escaped][river-strings]: * `\` is escaped with `\\` * `"` is escaped with `\"` -[river-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#strings" >}} +[river-strings]: ../../../concepts/config-language/expressions/types_and_values/#strings [OTTL]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.85.0/pkg/ottl/README.md diff --git a/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md b/docs/sources/reference/components/otelcol.processor.k8sattributes.md similarity index 72% rename from docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md rename to docs/sources/reference/components/otelcol.processor.k8sattributes.md index fb2f1c785a..1622902877 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md +++ b/docs/sources/reference/components/otelcol.processor.k8sattributes.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.k8sattributes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.k8sattributes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.k8sattributes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.k8sattributes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.k8sattributes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.k8sattributes/ description: Learn about otelcol.processor.k8sattributes title: otelcol.processor.k8sattributes --- @@ -15,13 +10,11 @@ title: otelcol.processor.k8sattributes components and adds Kubernetes metadata to the resource attributes of spans, logs, or metrics. {{< admonition type="note" >}} -`otelcol.processor.k8sattributes` is a wrapper over the upstream OpenTelemetry -Collector `k8sattributes` processor. If necessary, bug reports or feature requests -will be redirected to the upstream repository. +`otelcol.processor.k8sattributes` is a wrapper over the upstream OpenTelemetry Collector `k8sattributes` processor. +If necessary, bug reports or feature requests will be redirected to the upstream repository. {{< /admonition >}} -You can specify multiple `otelcol.processor.k8sattributes` components by giving them -different labels. +You can specify multiple `otelcol.processor.k8sattributes` components by giving them different labels. ## Usage @@ -39,10 +32,10 @@ otelcol.processor.k8sattributes "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- |--------------------------------------------|-----------------| -------- -`auth_type` | `string` | Authentication method when connecting to the Kubernetes API. | `serviceAccount` | no -`passthrough` | `bool` | Passthrough signals as-is, only adding a `k8s.pod.ip` resource attribute. | `false` | no +Name | Type | Description | Default | Required +--------------|----------|---------------------------------------------------------------------------|------------------|--------- +`auth_type` | `string` | Authentication method when connecting to the Kubernetes API. | `serviceAccount` | no +`passthrough` | `bool` | Passthrough signals as-is, only adding a `k8s.pod.ip` resource attribute. | `false` | no The supported values for `auth_type` are: * `none`: No authentication is required. @@ -65,19 +58,20 @@ you can configure the DaemonSet {{< param "PRODUCT_ROOT_NAME" >}}s with `passthr The following blocks are supported inside the definition of `otelcol.processor.k8sattributes`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes -extract | [extract][] | Rules for extracting data from Kubernetes. | no -extract > annotation | [annotation][] | Creating resource attributes from Kubernetes annotations. | no -extract > label | [extract_label][] | Creating resource attributes from Kubernetes labels. | no -filter | [filter][] | Filters the data loaded from Kubernetes. | no -filter > field | [field][] | Filter pods by generic Kubernetes fields. | no -filter > label | [filter_label][] | Filter pods by Kubernetes labels. | no -pod_association | [pod_association][] | Rules to associate pod metadata with telemetry signals. | no -pod_association > source | [source][] | Source information to identify a pod. | no -exclude | [exclude][] | Exclude pods from being processed. | no -exclude > pod | [pod][] | Pod information. | no + +Hierarchy | Block | Description | Required +-------------------------|---------------------|-----------------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes +extract | [extract][] | Rules for extracting data from Kubernetes. | no +extract > annotation | [annotation][] | Creating resource attributes from Kubernetes annotations. | no +extract > label | [extract_label][] | Creating resource attributes from Kubernetes labels. | no +filter | [filter][] | Filters the data loaded from Kubernetes. | no +filter > field | [field][] | Filter pods by generic Kubernetes fields. | no +filter > label | [filter_label][] | Filter pods by Kubernetes labels. | no +pod_association | [pod_association][] | Rules to associate pod metadata with telemetry signals. | no +pod_association > source | [source][] | Source information to identify a pod. | no +exclude | [exclude][] | Exclude pods from being processed. | no +exclude > pod | [pod][] | Pod information. | no The `>` symbol indicates deeper levels of nesting. For example, `extract > annotation` @@ -101,8 +95,8 @@ The `extract` block configures which metadata, annotations, and labels to extrac The following attributes are supported: -Name | Type | Description | Default | Required ----- |----------------|--------------------------------------|-------------| -------- +Name | Type | Description | Default | Required +-----------|----------------|--------------------------------------|-------------|--------- `metadata` | `list(string)` | Pre-configured metadata keys to add. | _See below_ | no The currently supported `metadata` keys are: @@ -143,13 +137,13 @@ By default, if `metadata` is not specified, the following fields are extracted a The `annotation` block configures how to extract Kubernetes annotations. -{{< docs/shared lookup="flow/reference/components/extract-field-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/extract-field-block.md" source="alloy" version="" >}} ### label block {#extract-label-block} The `label` block configures how to extract Kubernetes labels. -{{< docs/shared lookup="flow/reference/components/extract-field-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/extract-field-block.md" source="alloy" version="" >}} ### filter block @@ -157,10 +151,10 @@ The `filter` block configures which nodes to get data from and which fields and The following attributes are supported: -Name | Type | Description | Default | Required ----- |----------|-------------------------------------------------------------------------| ------- | -------- -`node` | `string` | Configures a Kubernetes node name or host name. | `""` | no -`namespace` | `string` | Filters all pods by the provided namespace. All other pods are ignored. | `""` | no +Name | Type | Description | Default | Required +------------|----------|-------------------------------------------------------------------------|---------|--------- +`node` | `string` | Configures a Kubernetes node name or host name. | `""` | no +`namespace` | `string` | Filters all pods by the provided namespace. All other pods are ignored. | `""` | no If `node` is specified, then any pods not running on the specified node will be ignored by `otelcol.processor.k8sattributes`. @@ -168,13 +162,13 @@ If `node` is specified, then any pods not running on the specified node will be The `field` block allows you to filter pods by generic Kubernetes fields. -{{< docs/shared lookup="flow/reference/components/field-filter-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/field-filter-block.md" source="alloy" version="" >}} ### label block {#filter-label-block} The `label` block allows you to filter pods by generic Kubernetes labels. -{{< docs/shared lookup="flow/reference/components/field-filter-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/field-filter-block.md" source="alloy" version="" >}} ### pod_association block @@ -215,10 +209,10 @@ pod to be associated with the telemetry signal. The following attributes are supported: -Name | Type | Description | Default | Required ----- |----------|----------------------------------------------------------------------------------| ------- | -------- -`from` | `string` | The association method. Currently supports `resource_attribute` and `connection` | | yes -`name` | `string` | Name represents extracted key name. For example, `ip`, `pod_uid`, `k8s.pod.ip` | | no +Name | Type | Description | Default | Required +-------|----------|----------------------------------------------------------------------------------|---------|--------- +`from` | `string` | The association method. Currently supports `resource_attribute` and `connection` | | yes +`name` | `string` | Name represents extracted key name. For example, `ip`, `pod_uid`, `k8s.pod.ip` | | no ### exclude block @@ -231,20 +225,20 @@ The `pod` block configures a pod to be excluded from the processor. The following attributes are supported: -Name | Type | Description | Default | Required ----- |----------|---------------------| ------- | -------- -`name` | `string` | The name of the pod | | yes +Name | Type | Description | Default | Required +-------|----------|---------------------|---------|--------- +`name` | `string` | The name of the pod | | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, logs, or traces). diff --git a/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md b/docs/sources/reference/components/otelcol.processor.memory_limiter.md similarity index 69% rename from docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md rename to docs/sources/reference/components/otelcol.processor.memory_limiter.md index a7c5a90ab3..9bf3ec2f9f 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md +++ b/docs/sources/reference/components/otelcol.processor.memory_limiter.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.memory_limiter/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.memory_limiter/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.memory_limiter/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.memory_limiter/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.memory_limiter/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.memory_limiter/ description: Learn about otelcol.processor.memory_limiter title: otelcol.processor.memory_limiter --- @@ -36,7 +31,7 @@ giving them different labels. ```river otelcol.processor.memory_limiter "LABEL" { check_interval = "1s" - + limit = "50MiB" // alternatively, set `limit_percentage` and `spike_limit_percentage` output { @@ -51,14 +46,13 @@ otelcol.processor.memory_limiter "LABEL" { `otelcol.processor.memory_limiter` supports the following arguments: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`check_interval` | `duration` | How often to check memory usage. | | yes -`limit` | `string` | Maximum amount of memory targeted to be allocated by the process heap. | `"0MiB"` | no -`spike_limit` | `string` | Maximum spike expected between the measurements of memory usage. | 20% of `limit` | no -`limit_percentage` | `int` | Maximum amount of total available memory targeted to be allocated by the process heap. | `0` | no -`spike_limit_percentage` |` int` | Maximum spike expected between the measurements of memory usage. | `0` | no +Name | Type | Description | Default | Required +-------------------------|------------|----------------------------------------------------------------------------------------|----------------|--------- +`check_interval` | `duration` | How often to check memory usage. | | yes +`limit` | `string` | Maximum amount of memory targeted to be allocated by the process heap. | `"0MiB"` | no +`spike_limit` | `string` | Maximum spike expected between the measurements of memory usage. | 20% of `limit` | no +`limit_percentage` | `int` | Maximum amount of total available memory targeted to be allocated by the process heap. | `0` | no +`spike_limit_percentage` | ` int` | Maximum spike expected between the measurements of memory usage. | `0` | no The arguments must define either `limit` or the `limit_percentage, spike_limit_percentage` pair, but not both. @@ -79,22 +73,22 @@ The `limit` and `spike_limit` values must be larger than 1 MiB. The following blocks are supported inside the definition of `otelcol.processor.memory_limiter`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes +Hierarchy | Block | Description | Required +----------|------------|---------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes [output]: #output-block ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, diff --git a/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md b/docs/sources/reference/components/otelcol.processor.probabilistic_sampler.md similarity index 70% rename from docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md rename to docs/sources/reference/components/otelcol.processor.probabilistic_sampler.md index 70dfbf8ba6..b221d276df 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md +++ b/docs/sources/reference/components/otelcol.processor.probabilistic_sampler.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.probabilistic_sampler/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.probabilistic_sampler/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.probabilistic_sampler/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.probabilistic_sampler/ description: Learn about telcol.processor.probabilistic_sampler labels: stage: experimental @@ -11,18 +8,16 @@ title: otelcol.processor.probabilistic_sampler # otelcol.processor.probabilistic_sampler -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otelcol.processor.probabilistic_sampler` accepts logs and traces data from other otelcol components and applies probabilistic sampling based on configuration options. {{< admonition type="note" >}} -`otelcol.processor.probabilistic_sampler` is a wrapper over the upstream -OpenTelemetry Collector Contrib `probabilistic_sampler` processor. If necessary, -bug reports or feature requests will be redirected to the upstream repository. +`otelcol.processor.probabilistic_sampler` is a wrapper over the upstream OpenTelemetry Collector Contrib `probabilistic_sampler` processor. +If necessary, bug reports or feature requests will be redirected to the upstream repository. {{< /admonition >}} -You can specify multiple `otelcol.processor.probabilistic_sampler` components by giving them -different labels. +You can specify multiple `otelcol.processor.probabilistic_sampler` components by giving them different labels. ## Usage @@ -39,30 +34,29 @@ otelcol.processor.probabilistic_sampler "LABEL" { `otelcol.processor.probabilistic_sampler` supports the following arguments: -Name | Type | Description | Default | Required ----- |-----------|----------------------------------------------------------------------------------------------------------------------|-------------| -------- -`hash_seed` | `uint32` | An integer used to compute the hash algorithm. | `0` | no -`sampling_percentage` | `float32` | Percentage of traces or logs sampled. | `0` | no -`attribute_source` | `string` | Defines where to look for the attribute in `from_attribute`. | `"traceID"` | no -`from_attribute` | `string` | The name of a log record attribute used for sampling purposes. | `""` | no -`sampling_priority` | `string` | The name of a log record attribute used to set a different sampling priority from the `sampling_percentage` setting. | `""` | no +Name | Type | Description | Default | Required +----------------------|-----------|----------------------------------------------------------------------------------------------------------------------|-------------|--------- +`hash_seed` | `uint32` | An integer used to compute the hash algorithm. | `0` | no +`sampling_percentage` | `float32` | Percentage of traces or logs sampled. | `0` | no +`attribute_source` | `string` | Defines where to look for the attribute in `from_attribute`. | `"traceID"` | no +`from_attribute` | `string` | The name of a log record attribute used for sampling purposes. | `""` | no +`sampling_priority` | `string` | The name of a log record attribute used to set a different sampling priority from the `sampling_percentage` setting. | `""` | no `hash_seed` determines an integer to compute the hash algorithm. This argument could be used for both traces and logs. When used for logs, it computes the hash of a log record. -For hashing to work, all collectors for a given tier, for example, behind the same load balancer, must have the same `hash_seed`. -It is also possible to leverage a different `hash_seed` at different collector tiers to support additional sampling requirements. +For hashing to work, all collectors for a given tier, for example, behind the same load balancer, must have the same `hash_seed`. +It is also possible to leverage a different `hash_seed` at different collector tiers to support additional sampling requirements. `sampling_percentage` determines the percentage at which traces or logs are sampled. All traces or logs are sampled if you set this argument to a value greater than or equal to 100. -`attribute_source` (logs only) determines where to look for the attribute in `from_attribute`. The allowed values are `traceID` or `record`. +`attribute_source` (logs only) determines where to look for the attribute in `from_attribute`. The allowed values are `traceID` or `record`. `from_attribute` (logs only) determines the name of a log record attribute used for sampling purposes, such as a unique log record ID. The value of the attribute is only used if the trace ID is absent or if `attribute_source` is set to `record`. `sampling_priority` (logs only) determines the name of a log record attribute used to set a different sampling priority from the `sampling_percentage` setting. 0 means to never sample the log record, and greater than or equal to 100 means to always sample the log record. The `probabilistic_sampler` supports two types of sampling for traces: -1. `sampling.priority` [semantic - convention](https://github.com/opentracing/specification/blob/master/semantic_conventions.md#span-tags-table) as defined by OpenTracing. +1. `sampling.priority` [semantic convention](https://github.com/opentracing/specification/blob/master/semantic_conventions.md#span-tags-table) as defined by OpenTracing. 2. Trace ID hashing. The `sampling.priority` semantic convention takes priority over trace ID hashing. @@ -74,8 +68,8 @@ The `probabilistic_sampler` supports sampling logs according to their trace ID, The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` OTLP-formatted data for any telemetry signal of these types: @@ -133,7 +127,7 @@ otelcol.processor.probabilistic_sampler "default" { } ``` -### Sample logs according to a "priority" attribute +### Sample logs according to a "priority" attribute ```river otelcol.processor.probabilistic_sampler "default" { diff --git a/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md b/docs/sources/reference/components/otelcol.processor.resourcedetection.md similarity index 95% rename from docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md rename to docs/sources/reference/components/otelcol.processor.resourcedetection.md index 2cc2224fa6..f28898eb09 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md +++ b/docs/sources/reference/components/otelcol.processor.resourcedetection.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.resourcedetection/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.resourcedetection/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.resourcedetection/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.resourcedetection/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.resourcedetection/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.resourcedetection/ labels: stage: beta title: otelcol.processor.resourcedetection @@ -13,11 +8,11 @@ description: Learn about otelcol.processor.resourcedetection # otelcol.processor.resourcedetection -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} -`otelcol.processor.resourcedetection` detects resource information from the host -in a format that conforms to the [OpenTelemetry resource semantic conventions](https://github.com/open-telemetry/opentelemetry-specification/tree/main/specification/resource/semantic_conventions/), and appends or -overrides the resource values in the telemetry data with this information. +`otelcol.processor.resourcedetection` detects resource information from the host in a format that conforms to the [OpenTelemetry resource semantic conventions][], and appends or overrides the resource values in the telemetry data with this information. + +[OpenTelemetry resource semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification/tree/main/specification/resource/semantic_conventions/ {{< admonition type="note" >}} `otelcol.processor.resourcedetection` is a wrapper over the upstream @@ -123,16 +118,15 @@ kubernetes_node | [kubernetes_node][] | [system]: #system [openshift]: #openshift [kubernetes_node]: #kubernetes_node - [res-attr-cfg]: #resource-attribute-config ### output -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ### ec2 -The `ec2` block reads resource information from the [EC2 instance metadata API] using the [AWS SDK for Go][]. +The `ec2` block reads resource information from the [EC2 instance metadata API][] using the [AWS SDK for Go][]. The `ec2` block supports the following attributes: @@ -155,9 +149,9 @@ To fetch EC2 tags, the IAM role assigned to the EC2 instance must have a policy The `ec2` block supports the following blocks: -Block | Description | Required ----------------------------------------------- | ------------------------------------------------- | -------- -[resource_attributes](#ec2--resource_attributes) | Configures which resource attributes to add. | no +Block | Description | Required +-------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#ec2--resource_attributes) | Configures which resource attributes to add. | no ##### ec2 > resource_attributes @@ -177,7 +171,7 @@ Block | Description ### ecs -The `ecs` block queries the Task Metadata Endpoint (TMDE) to record information about the current ECS Task. Only TMDE V4 and V3 are supported. +The `ecs` block queries the [Task Metadata Endpoint][] (TMDE) to record information about the current ECS Task. Only TMDE V4 and V3 are supported. [Task Metadata Endpoint]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html @@ -582,9 +576,7 @@ For more information, see the [Heroku cloud provider documentation][] under the The `system` block queries the host machine to retrieve various resource attributes. {{< admonition type="note" >}} - Use the [Docker](#docker) detector if running {{< param "PRODUCT_ROOT_NAME" >}} as a Docker container. - {{< /admonition >}} The `system` block supports the following attributes: @@ -659,17 +651,17 @@ The determination of the API address, `ca_file`, and the service token is skippe The `openshift` block supports the following blocks: -Block | Description | Required ----------------------------------------------- | ---------------------------------------------------- | -------- -[resource_attributes](#openshift--resource_attributes) | Configures which resource attributes to add. | no -[tls](#openshift--tls) | TLS settings for the connection with the OpenShift API. | yes +Block | Description | Required +-------------------------------------------------------|---------------------------------------------------------|--------- +[resource_attributes](#openshift--resource_attributes) | Configures which resource attributes to add. | no +[tls](#openshift--tls) | TLS settings for the connection with the OpenShift API. | yes #### openshift > tls The `tls` block configures TLS settings used for the connection to the gRPC server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} #### openshift > resource_attributes @@ -713,9 +705,9 @@ rules: The `kubernetes_node` block supports the following blocks: -Block | Description | Required ----------------------------------------------- | ------------------------------------------------- | -------- -[resource_attributes](#kubernetes_node--resource_attributes) | Configures which resource attributes to add. | no +Block | Description | Required +-------------------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#kubernetes_node--resource_attributes) | Configures which resource attributes to add. | no #### kubernetes_node > resource_attributes diff --git a/docs/sources/flow/reference/components/otelcol.processor.span.md b/docs/sources/reference/components/otelcol.processor.span.md similarity index 65% rename from docs/sources/flow/reference/components/otelcol.processor.span.md rename to docs/sources/reference/components/otelcol.processor.span.md index 71c7357fec..7709b5b180 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.span.md +++ b/docs/sources/reference/components/otelcol.processor.span.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.span/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.span/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.span/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.span/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.span/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.span/ description: Learn about otelcol.processor.span labels: stage: experimental @@ -13,19 +8,17 @@ title: otelcol.processor.span # otelcol.processor.span -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} -`otelcol.processor.span` accepts traces telemetry data from other `otelcol` -components and modifies the names and attributes of the spans. -It also supports the ability to filter input data to determine if -it should be included or excluded from this processor. +`otelcol.processor.span` accepts traces telemetry data from other `otelcol` components and modifies the names and attributes of the spans. +It also supports the ability to filter input data to determine if it should be included or excluded from this processor. -> **NOTE**: `otelcol.processor.span` is a wrapper over the upstream -> OpenTelemetry Collector `span` processor. Bug reports or feature requests -> will be redirected to the upstream repository, if necessary. +{{< admonition type="note" >}} +`otelcol.processor.span` is a wrapper over the upstream OpenTelemetry Collector `span` processor. +Bug reports or feature requests will be redirected to the upstream repository, if necessary. +{{< /admonition >}} -You can specify multiple `otelcol.processor.span` components by giving them -different labels. +You can specify multiple `otelcol.processor.span` components by giving them different labels. ## Usage @@ -39,29 +32,29 @@ otelcol.processor.span "LABEL" { ## Arguments -`otelcol.processor.span` doesn't support any arguments and is configured fully -through inner blocks. +`otelcol.processor.span` doesn't support any arguments and is configured fully through inner blocks. ## Blocks The following blocks are supported inside the definition of `otelcol.processor.span`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -output | [output][] | Configures where to send received telemetry data. | yes -name | [name][] | Configures how to rename a span and add attributes. | no -name > to_attributes | [to-attributes][] | Configuration to create attributes from a span name. | no -status | [status][] | Specifies a status which should be set for this span. | no -include | [include][] | Filter for data included in this processor's actions. | no -include > regexp | [regexp][] | Regex cache settings. | no -include > attribute | [attribute][] | A list of attributes to match against. | no -include > resource | [resource][] | A list of items to match the resources against. | no -include > library | [library][] | A list of items to match the implementation library against. | no -exclude | [exclude][] | Filter for data excluded from this processor's actions | no -exclude > regexp | [regexp][] | Regex cache settings. | no -exclude > attribute | [attribute][] | A list of attributes to match against. | no -exclude > resource | [resource][] | A list of items to match the resources against. | no -exclude > library | [library][] | A list of items to match the implementation library against. | no + +Hierarchy | Block | Description | Required +---------------------|-------------------|--------------------------------------------------------------|--------- +output | [output][] | Configures where to send received telemetry data. | yes +name | [name][] | Configures how to rename a span and add attributes. | no +name > to_attributes | [to-attributes][] | Configuration to create attributes from a span name. | no +status | [status][] | Specifies a status which should be set for this span. | no +include | [include][] | Filter for data included in this processor's actions. | no +include > regexp | [regexp][] | Regex cache settings. | no +include > attribute | [attribute][] | A list of attributes to match against. | no +include > resource | [resource][] | A list of items to match the resources against. | no +include > library | [library][] | A list of items to match the implementation library against. | no +exclude | [exclude][] | Filter for data excluded from this processor's actions | no +exclude > regexp | [regexp][] | Regex cache settings. | no +exclude > attribute | [attribute][] | A list of attributes to match against. | no +exclude > resource | [resource][] | A list of items to match the resources against. | no +exclude > library | [library][] | A list of items to match the implementation library against. | no The `>` symbol indicates deeper levels of nesting. For example, `include > attribute` refers to an `attribute` block defined inside an `include` block. @@ -81,13 +74,13 @@ If both an `include` block and an `exclude`block are specified, the `include` pr ### name block -The `name` block configures how to rename a span and add attributes. +The `name` block configures how to rename a span and add attributes. The following attributes are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`from_attributes` | `list(string)` | Attribute keys to pull values from, to generate a new span name. | `[]` | no +Name | Type | Description | Default | Required +------------------|----------------|------------------------------------------------------------------|---------|--------- +`from_attributes` | `list(string)` | Attribute keys to pull values from, to generate a new span name. | `[]` | no `separator` | `string` | Separates attributes values in the new span name. | `""` | no Firstly `from_attributes` rules are applied, then [to-attributes][] are applied. @@ -111,10 +104,10 @@ The `to_attributes` block configures how to create attributes from a span name. The following attributes are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`rules` | `list(string)` | A list of regex rules to extract attribute values from span name. | | yes -`break_after_match` | `bool` | Configures if processing of rules should stop after the first match. | `false` | no +Name | Type | Description | Default | Required +--------------------|----------------|----------------------------------------------------------------------|---------|--------- +`rules` | `list(string)` | A list of regex rules to extract attribute values from span name. | | yes +`break_after_match` | `bool` | Configures if processing of rules should stop after the first match. | `false` | no Each rule in the `rules` list is a regex pattern string. 1. The span name is checked against each regex in the list. @@ -135,10 +128,10 @@ The `status` block specifies a status which should be set for this span. The following attributes are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`code` | `string` | A status code. | | yes -`description` | `string` | An optional field documenting Error status codes. | `""` | no +Name | Type | Description | Default | Required +--------------|----------|---------------------------------------------------|---------|--------- +`code` | `string` | A status code. | | yes +`description` | `string` | An optional field documenting Error status codes. | `""` | no The supported values for `code` are: * `Ok` @@ -154,12 +147,12 @@ The `include` block provides an option to include data being fed into the The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`match_type` | `string` | Controls how items to match against are interpreted. | | yes -`services` | `list(string)` | A list of items to match the service name against. | `[]` | no -`span_names` | `list(string)` | A list of items to match the span name against. | `[]` | no -`span_kinds` | `list(string)` | A list of items to match the span kind against. | `[]` | no +Name | Type | Description | Default | Required +-------------|----------------|------------------------------------------------------|---------|--------- +`match_type` | `string` | Controls how items to match against are interpreted. | | yes +`services` | `list(string)` | A list of items to match the service name against. | `[]` | no +`span_names` | `list(string)` | A list of items to match the span name against. | `[]` | no +`span_kinds` | `list(string)` | A list of items to match the span kind against. | `[]` | no `match_type` is required and must be set to either `"regexp"` or `"strict"`. @@ -175,12 +168,12 @@ The `exclude` block provides an option to exclude data from being fed into the The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`match_type` | `string` | Controls how items to match against are interpreted. | | yes -`services` | `list(string)` | A list of items to match the service name against. | `[]` | no -`span_names` | `list(string)` | A list of items to match the span name against. | `[]` | no -`span_kinds` | `list(string)` | A list of items to match the span kind against. | `[]` | no +Name | Type | Description | Default | Required +-------------|----------------|------------------------------------------------------|---------|--------- +`match_type` | `string` | Controls how items to match against are interpreted. | | yes +`services` | `list(string)` | A list of items to match the service name against. | `[]` | no +`span_names` | `list(string)` | A list of items to match the span name against. | `[]` | no +`span_kinds` | `list(string)` | A list of items to match the span kind against. | `[]` | no `match_type` is required and must be set to either `"regexp"` or `"strict"`. @@ -191,30 +184,30 @@ with a non-empty value for a valid configuration. ### regexp block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-regexp-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-regexp-block.md" source="alloy" version="" >}} ### attribute block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-attribute-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-attribute-block.md" source="alloy" version="" >}} ### resource block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-resource-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-resource-block.md" source="alloy" version="" >}} ### library block -{{< docs/shared lookup="flow/reference/components/otelcol-filter-library-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-filter-library-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block-traces.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block-traces.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` OTLP-formatted data for traces telemetry signals. diff --git a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md b/docs/sources/reference/components/otelcol.processor.tail_sampling.md similarity index 80% rename from docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md rename to docs/sources/reference/components/otelcol.processor.tail_sampling.md index 32ff9ac4f7..c27fae1098 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md +++ b/docs/sources/reference/components/otelcol.processor.tail_sampling.md @@ -4,7 +4,7 @@ aliases: - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.tail_sampling/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.tail_sampling/ - /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.tail_sampling/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.tail_sampling/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.tail_sampling/ description: Learn about otelcol.processor.tail_sampling labels: stage: beta @@ -13,7 +13,7 @@ title: otelcol.processor.tail_sampling # otelcol.processor.tail_sampling -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `otelcol.processor.tail_sampling` samples traces based on a set of defined policies. All spans for a given trace *must* be received by the same collector @@ -53,11 +53,11 @@ otelcol.processor.tail_sampling "LABEL" { `otelcol.processor.tail_sampling` supports the following arguments: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- +Name | Type | Description | Default | Required +------------------------------|------------|------------------------------------------------------------------------------|---------|--------- `decision_wait` | `duration` | Wait time since the first span of a trace before making a sampling decision. | `"30s"` | no -`num_traces` | `int` | Number of traces kept in memory. | `50000` | no -`expected_new_traces_per_sec` | `int` | Expected number of new traces (helps in allocating data structures). | `0` | no +`num_traces` | `int` | Number of traces kept in memory. | `50000` | no +`expected_new_traces_per_sec` | `int` | Expected number of new traces (helps in allocating data structures). | `0` | no `decision_wait` determines the number of batches to maintain on a channel. Its value must convert to a number of seconds greater than zero. @@ -125,7 +125,7 @@ output | [output] [] | Co [composite]: #composite-block [composite_sub_policy]: #composite_sub_policy-block [output]: #output-block -[otelcol.exporter.otlp]: {{< relref "./otelcol.exporter.otlp.md" >}} +[otelcol.exporter.otlp]: ../otelcol.exporter.otlp/ ### policy block @@ -133,10 +133,10 @@ The `policy` block configures a sampling policy used by the component. At least The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`name` | `string` | The custom name given to the policy. | | yes -`type` | `string` | The valid policy type for this policy. | | yes +Name | Type | Description | Default | Required +-------|----------|----------------------------------------|---------|--------- +`name` | `string` | The custom name given to the policy. | | yes +`type` | `string` | The valid policy type for this policy. | | yes Each policy results in a decision, and the processor evaluates them to make a final decision: @@ -153,9 +153,9 @@ The `latency` block configures a policy of type `latency`. The policy samples ba The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`threshold_ms` | `number` | The latency threshold for sampling, in milliseconds. | | yes +Name | Type | Description | Default | Required +---------------|----------|------------------------------------------------------|---------|--------- +`threshold_ms` | `number` | The latency threshold for sampling, in milliseconds. | | yes ### numeric_attribute block @@ -163,11 +163,11 @@ The `numeric_attribute` block configures a policy of type `numeric_attribute`. T The following arguments are supported: -Name | Type | Description | Default | Required ----- | ------- | ----------- | ------- | -------- -`key` | `string` | Tag that the filter is matched against. | | yes -`min_value` | `number` | The minimum value of the attribute to be considered a match. | | yes -`max_value` | `number` | The maximum value of the attribute to be considered a match. | | yes +Name | Type | Description | Default | Required +---------------|----------|----------------------------------------------------------------|---------|--------- +`key` | `string` | Tag that the filter is matched against. | | yes +`min_value` | `number` | The minimum value of the attribute to be considered a match. | | yes +`max_value` | `number` | The maximum value of the attribute to be considered a match. | | yes `invert_match` | `bool` | Indicates that values must not match against attribute values. | `false` | no ### probabilistic block @@ -176,10 +176,10 @@ The `probabilistic` block configures a policy of type `probabilistic`. The polic The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`sampling_percentage` | `number` | The percentage rate at which traces are sampled. | | yes -`hash_salt` | `string` | See below. | | no +Name | Type | Description | Default | Required +----------------------|----------|--------------------------------------------------|---------|--------- +`sampling_percentage` | `number` | The percentage rate at which traces are sampled. | | yes +`hash_salt` | `string` | See below. | | no Use `hash_salt` to configure the hashing salts. This is important in scenarios where multiple layers of collectors have different sampling rates. If multiple collectors use the same salt with different sampling rates, passing one @@ -191,9 +191,9 @@ The `status_code` block configures a policy of type `status_code`. The policy sa The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`status_codes` | `list(string)` | Holds the configurable settings to create a status code filter sampling policy evaluator. | | yes +Name | Type | Description | Default | Required +---------------|----------------|-------------------------------------------------------------------------------------------|---------|--------- +`status_codes` | `list(string)` | Holds the configurable settings to create a status code filter sampling policy evaluator. | | yes `status_codes` values must be "OK", "ERROR" or "UNSET". @@ -217,9 +217,9 @@ The `rate_limiting` block configures a policy of type `rate_limiting`. The polic The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`spans_per_second` | `number` | Sets the maximum number of spans that can be processed each second. | | yes +Name | Type | Description | Default | Required +-------------------|----------|---------------------------------------------------------------------|---------|--------- +`spans_per_second` | `number` | Sets the maximum number of spans that can be processed each second. | | yes ### span_count block @@ -227,24 +227,24 @@ The `span_count` block configures a policy of type `span_count`. The policy samp The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`min_spans` | `number` | Minimum number of spans in a trace. | | yes -`max_spans` | `number` | Maximum number of spans in a trace. | `0` | no +Name | Type | Description | Default | Required +------------|----------|-------------------------------------|---------|--------- +`min_spans` | `number` | Minimum number of spans in a trace. | | yes +`max_spans` | `number` | Maximum number of spans in a trace. | `0` | no Set `max_spans` to `0`, if you do not want to limit the policy samples based on the maximum number of spans in a trace. ### boolean_attribute block -The `boolean_attribute` block configures a policy of type `boolean_attribute`. +The `boolean_attribute` block configures a policy of type `boolean_attribute`. The policy samples based on a boolean attribute (resource and record). The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`key` | `string` | Attribute key to match against. | | yes -`value` | `bool` | The bool value (`true` or `false`) to use when matching against attribute values. | | yes +Name | Type | Description | Default | Required +--------|----------|-----------------------------------------------------------------------------------|---------|--------- +`key` | `string` | Attribute key to match against. | | yes +`value` | `bool` | The bool value (`true` or `false`) to use when matching against attribute values. | | yes ### ottl_condition block @@ -253,11 +253,11 @@ The `ottl_condition` block configures a policy of type `ottl_condition`. The pol The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`error_mode` | `string` | Error handling if OTTL conditions fail to evaluate. | | yes -`span` | `list(string)` | OTTL conditions for spans. | `[]` | no -`spanevent` | `list(string)` | OTTL conditions for span events. | `[]` | no +Name | Type | Description | Default | Required +-------------|----------------|-----------------------------------------------------|---------|--------- +`error_mode` | `string` | Error handling if OTTL conditions fail to evaluate. | | yes +`span` | `list(string)` | OTTL conditions for spans. | `[]` | no +`spanevent` | `list(string)` | OTTL conditions for span events. | `[]` | no The supported values for `error_mode` are: * `ignore`: Errors cause evaluation to continue to the next statement. @@ -271,10 +271,10 @@ The `trace_state` block configures a policy of type `trace_state`. The policy sa The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`key` | `string` | Tag that the filter is matched against. | | yes -`values` | `list(string)` | Set of values to use when matching against trace_state values. | | yes +Name | Type | Description | Default | Required +---------|----------------|----------------------------------------------------------------|---------|--------- +`key` | `string` | Tag that the filter is matched against. | | yes +`values` | `list(string)` | Set of values to use when matching against trace_state values. | | yes ### and block @@ -286,10 +286,10 @@ The `and_sub_policy` block configures a sampling policy used by the `and` block. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`name` | `string` | The custom name given to the policy. | | yes -`type` | `string` | The valid policy type for this policy. | | yes +Name | Type | Description | Default | Required +-------|----------|----------------------------------------|---------|--------- +`name` | `string` | The custom name given to the policy. | | yes +`type` | `string` | The valid policy type for this policy. | | yes ### composite block @@ -305,21 +305,21 @@ The `composite_sub_policy` block configures a sampling policy used by the `compo The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`name` | `string` | The custom name given to the policy. | | yes -`type` | `string` | The valid policy type for this policy. | | yes +Name | Type | Description | Default | Required +-------|----------|----------------------------------------|---------|--------- +`name` | `string` | The custom name given to the policy. | | yes +`type` | `string` | The valid policy type for this policy. | | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields The following fields are exported and can be referenced by other components: -Name | Type | Description ----- | ---- | ----------- +Name | Type | Description +--------|--------------------|----------------------------------------------------------------- `input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. `input` accepts `otelcol.Consumer` data for any telemetry signal (metrics, diff --git a/docs/sources/flow/reference/components/otelcol.processor.transform.md b/docs/sources/reference/components/otelcol.processor.transform.md similarity index 94% rename from docs/sources/flow/reference/components/otelcol.processor.transform.md rename to docs/sources/reference/components/otelcol.processor.transform.md index 65e8bd5b6c..03dd8bfe3d 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.transform.md +++ b/docs/sources/reference/components/otelcol.processor.transform.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.transform/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.transform/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.transform/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.transform/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.transform/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.transform/ description: Learn about otelcol.processor.transform labels: stage: experimental @@ -13,7 +8,7 @@ title: otelcol.processor.transform # otelcol.processor.transform -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otelcol.processor.transform` accepts telemetry data from other `otelcol` components and modifies it using the [OpenTelemetry Transformation Language (OTTL)][OTTL]. @@ -49,14 +44,14 @@ There are two ways of inputting strings in River configuration files: * Using backticks ([raw River strings][river-raw-strings]). No characters must be escaped. However, it's not possible to have backticks inside the string. -For example, the OTTL statement `set(description, "Sum") where type == "Sum"` can be written as: +For example, the OTTL statement `set(description, "Sum") where type == "Sum"` can be written as: * A normal River string: `"set(description, \"Sum\") where type == \"Sum\""`. * A raw River string: ``` `set(description, "Sum") where type == "Sum"` ```. Raw strings are generally more convenient for writing OTTL statements. -[river-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#strings" >}} -[river-raw-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#raw-strings" >}} +[river-strings]: ../../../concepts/config-language/expressions/types_and_values/#strings +[river-raw-strings]: ../../../concepts/config-language/expressions/types_and_values/#raw-strings {{< /admonition >}} {{< admonition type="note" >}} @@ -68,19 +63,19 @@ will be redirected to the upstream repository. You can specify multiple `otelcol.processor.transform` components by giving them different labels. {{< admonition type="warning" >}} -`otelcol.processor.transform` allows you to modify all aspects of your telemetry. Some specific risks are given below, -but this is not an exhaustive list. It is important to understand your data before using this processor. +`otelcol.processor.transform` allows you to modify all aspects of your telemetry. Some specific risks are given below, +but this is not an exhaustive list. It is important to understand your data before using this processor. -- [Unsound Transformations][]: Transformations between metric data types are not defined in the [metrics data model][]. -To use these functions, you must understand the incoming data and know that it can be meaningfully converted +- [Unsound Transformations][]: Transformations between metric data types are not defined in the [metrics data model][]. +To use these functions, you must understand the incoming data and know that it can be meaningfully converted to a new metric data type or can be used to create new metrics. - - Although OTTL allows you to use the `set` function with `metric.data_type`, + - Although OTTL allows you to use the `set` function with `metric.data_type`, its implementation in the transform processor is a [no-op][]. To modify a data type, you must use a specific function such as `convert_gauge_to_sum`. - [Identity Conflict][]: Transformation of metrics can potentially affect a metric's identity, - leading to an Identity Crisis. Be especially cautious when transforming a metric name and when reducing or changing + leading to an Identity Crisis. Be especially cautious when transforming a metric name and when reducing or changing existing attributes. Adding new attributes is safe. -- [Orphaned Telemetry][]: The processor allows you to modify `span_id`, `trace_id`, and `parent_span_id` for traces +- [Orphaned Telemetry][]: The processor allows you to modify `span_id`, `trace_id`, and `parent_span_id` for traces and `span_id`, and `trace_id` logs. Modifying these fields could lead to orphaned spans or logs. [Unsound Transformations]: https://github.com/open-telemetry/opentelemetry-collector/blob/{{< param "OTEL_VERSION" >}}/docs/standard-warnings.md#unsound-transformations @@ -251,7 +246,7 @@ span using the `span` context, it is more efficient to use the `resource` contex ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields @@ -566,8 +561,8 @@ Each statement is enclosed in backticks instead of quotation marks. This constitutes a [raw string][river-raw-strings], and lets us avoid the need to escape each `"` with a `\"`, and each `\` with a `\\` inside a [normal][river-strings] River string. -[river-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#strings" >}} -[river-raw-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#raw-strings" >}} +[river-strings]: ../../../concepts/config-language/expressions/types_and_values/#strings +[river-raw-strings]: ../../../concepts/config-language/expressions/types_and_values/#raw-strings [traces protobuf]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.0.0/opentelemetry/proto/trace/v1/trace.proto [metrics protobuf]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.0.0/opentelemetry/proto/metrics/v1/metrics.proto @@ -590,6 +585,7 @@ each `"` with a `\"`, and each `\` with a `\\` inside a [normal][river-strings] [OTTL metric context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottlmetric/README.md [OTTL datapoint context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottldatapoint/README.md [OTTL log context]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/contexts/ottllog/README.md + ## Compatible components diff --git a/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md b/docs/sources/reference/components/otelcol.receiver.jaeger.md similarity index 92% rename from docs/sources/flow/reference/components/otelcol.receiver.jaeger.md rename to docs/sources/reference/components/otelcol.receiver.jaeger.md index a77bc58c37..0d34dc1ed3 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md +++ b/docs/sources/reference/components/otelcol.receiver.jaeger.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.jaeger/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.jaeger/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.jaeger/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.jaeger/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.jaeger/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.jaeger/ description: Learn about otelcol.receiver.jaeger title: otelcol.receiver.jaeger --- @@ -115,7 +110,7 @@ Name | Type | Description | Default | Required The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### keepalive block @@ -223,11 +218,11 @@ Name | Type | Description | Default | Required ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md b/docs/sources/reference/components/otelcol.receiver.kafka.md similarity index 93% rename from docs/sources/flow/reference/components/otelcol.receiver.kafka.md rename to docs/sources/reference/components/otelcol.receiver.kafka.md index a1bcf950de..312e2fe7ee 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md +++ b/docs/sources/reference/components/otelcol.receiver.kafka.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.kafka/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.kafka/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.kafka/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.kafka/ description: Learn about otelcol.receiver.kafka title: otelcol.receiver.kafka --- @@ -169,7 +164,7 @@ The `tls` block configures TLS settings used for connecting to the Kafka brokers. If the `tls` block isn't provided, TLS won't be used for communication. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### kerberos block @@ -279,11 +274,11 @@ Regular expressions are not allowed in the `headers` argument. Only exact matchi ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.loki.md b/docs/sources/reference/components/otelcol.receiver.loki.md similarity index 80% rename from docs/sources/flow/reference/components/otelcol.receiver.loki.md rename to docs/sources/reference/components/otelcol.receiver.loki.md index a658f35a7f..539e42e5cc 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.loki.md +++ b/docs/sources/reference/components/otelcol.receiver.loki.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.loki/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.loki/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.loki/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.loki/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.loki/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.loki/ description: Learn about otelcol.receiver.loki labels: stage: beta @@ -13,7 +8,7 @@ title: otelcol.receiver.loki # otelcol.receiver.loki -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `otelcol.receiver.loki` receives Loki log entries, converts them to the OpenTelemetry logs format, and forwards them to other `otelcol.*` components. @@ -49,7 +44,7 @@ output | [output][] | Configures where to send converted telemetry data. | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md b/docs/sources/reference/components/otelcol.receiver.opencensus.md similarity index 89% rename from docs/sources/flow/reference/components/otelcol.receiver.opencensus.md rename to docs/sources/reference/components/otelcol.receiver.opencensus.md index 01db61e67b..bf78f52021 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md +++ b/docs/sources/reference/components/otelcol.receiver.opencensus.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.opencensus/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.opencensus/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.opencensus/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.opencensus/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.opencensus/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.opencensus/ description: Learn about otelcol.receiver.opencensus title: otelcol.receiver.opencensus --- @@ -91,7 +86,7 @@ refers to a `tls` block defined inside a `grpc` block. The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### keepalive block @@ -131,11 +126,11 @@ Name | Type | Description | Default | Required ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md b/docs/sources/reference/components/otelcol.receiver.otlp.md similarity index 92% rename from docs/sources/flow/reference/components/otelcol.receiver.otlp.md rename to docs/sources/reference/components/otelcol.receiver.otlp.md index 55bb0db345..251ec9d6f6 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md +++ b/docs/sources/reference/components/otelcol.receiver.otlp.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.otlp/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.otlp/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.otlp/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.otlp/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.otlp/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.otlp/ description: Learn about otelcol.receiver.otlp title: otelcol.receiver.otlp --- @@ -187,11 +182,11 @@ If `allowed_headers` includes `"*"`, all headers are permitted. ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md b/docs/sources/reference/components/otelcol.receiver.prometheus.md similarity index 80% rename from docs/sources/flow/reference/components/otelcol.receiver.prometheus.md rename to docs/sources/reference/components/otelcol.receiver.prometheus.md index ce9e9b9f89..79a2dfe11e 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md +++ b/docs/sources/reference/components/otelcol.receiver.prometheus.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.prometheus/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.prometheus/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.prometheus/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.prometheus/ description: Learn about otelcol.receiver.prometheus labels: stage: beta @@ -13,7 +8,7 @@ title: otelcol.receiver.prometheus # otelcol.receiver.prometheus -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `otelcol.receiver.prometheus` receives Prometheus metrics, converts them to the OpenTelemetry metrics format, and forwards them to other `otelcol.*` @@ -50,7 +45,7 @@ output | [output][] | Configures where to send received telemetry data. | yes ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md b/docs/sources/reference/components/otelcol.receiver.vcenter.md similarity index 92% rename from docs/sources/flow/reference/components/otelcol.receiver.vcenter.md rename to docs/sources/reference/components/otelcol.receiver.vcenter.md index a7f0f70ced..8694e7a85f 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md +++ b/docs/sources/reference/components/otelcol.receiver.vcenter.md @@ -1,9 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.vcenter/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.vcenter/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.vcenter/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.vcenter/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.vcenter/ title: otelcol.receiver.vcenter description: Learn about otelcol.receiver.vcenter labels: @@ -12,7 +8,7 @@ labels: # otelcol.receiver.vcenter -{{< docs/shared lookup="flow/stability/experimental.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/experimental.md" source="alloy" version="" >}} `otelcol.receiver.vcenter` accepts metrics from a vCenter or ESXi host running VMware vSphere APIs and @@ -94,7 +90,7 @@ output | [output][] | Configures where to send received telemetry data. | yes The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### metrics block @@ -172,11 +168,11 @@ Name | Type | Description | Default | Required ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md b/docs/sources/reference/components/otelcol.receiver.zipkin.md similarity index 84% rename from docs/sources/flow/reference/components/otelcol.receiver.zipkin.md rename to docs/sources/reference/components/otelcol.receiver.zipkin.md index 87ed3b6ced..205d33ab76 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md +++ b/docs/sources/reference/components/otelcol.receiver.zipkin.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/otelcol.receiver.zipkin/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.receiver.zipkin/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.receiver.zipkin/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.zipkin/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.receiver.zipkin/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.zipkin/ description: Learn about otelcol.receiver.zipkin title: otelcol.receiver.zipkin --- @@ -71,7 +66,7 @@ refers to a `tls` block defined inside a `grpc` block. The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-tls-config-block.md" source="alloy" version="" >}} ### cors block @@ -97,11 +92,11 @@ If `allowed_headers` includes `"*"`, all headers are permitted. ### debug_metrics block -{{< docs/shared lookup="flow/reference/components/otelcol-debug-metrics-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/otelcol-debug-metrics-block.md" source="alloy" version="" >}} ### output block -{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/output-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/prometheus.exporter.apache.md b/docs/sources/reference/components/prometheus.exporter.apache.md similarity index 81% rename from docs/sources/flow/reference/components/prometheus.exporter.apache.md rename to docs/sources/reference/components/prometheus.exporter.apache.md index 5bbccf271d..4c9b9acc6b 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.apache.md +++ b/docs/sources/reference/components/prometheus.exporter.apache.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.apache/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.apache/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.apache/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.apache/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.apache/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.apache/ description: Learn about prometheus.exporter.apache title: prometheus.exporter.apache --- @@ -34,7 +29,7 @@ All arguments are optional. Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -86,7 +81,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.azure.md b/docs/sources/reference/components/prometheus.exporter.azure.md similarity index 95% rename from docs/sources/flow/reference/components/prometheus.exporter.azure.md rename to docs/sources/reference/components/prometheus.exporter.azure.md index 3c014f6919..d02385f94d 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.azure.md +++ b/docs/sources/reference/components/prometheus.exporter.azure.md @@ -1,17 +1,12 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.azure/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.azure/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.azure/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.azure/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.azure/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.azure/ description: Learn about prometheus.exporter.azure title: prometheus.exporter.azure --- # prometheus.exporter.azure -The `prometheus.exporter.azure` component embeds [`azure-metrics-exporter`](https://github.com/webdevops/azure-metrics-exporter) to collect metrics from [Azure Monitor](https://azure.microsoft.com/en-us/products/monitor). +The `prometheus.exporter.azure` component embeds [`azure-metrics-exporter`](https://github.com/webdevops/azure-metrics-exporter) to collect metrics from [Azure Monitor](https://azure.microsoft.com/en-us/products/monitor). The exporter supports all metrics defined by Azure Monitor. You can find the complete list of available metrics in the [Azure Monitor documentation](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported). Metrics for this integration are exposed with the template `azure_{type}_{metric}_{aggregation}_{unit}` by default. As an example, @@ -101,7 +96,7 @@ Valid values for `azure_cloud_environment` are `azurecloud`, `azurechinacloud`, ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health diff --git a/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md b/docs/sources/reference/components/prometheus.exporter.blackbox.md similarity index 90% rename from docs/sources/flow/reference/components/prometheus.exporter.blackbox.md rename to docs/sources/reference/components/prometheus.exporter.blackbox.md index 6fc8021d7b..39d02ef419 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md +++ b/docs/sources/reference/components/prometheus.exporter.blackbox.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.blackbox/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.blackbox/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.blackbox/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.blackbox/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.blackbox/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.blackbox/ description: Learn about prometheus.exporter.blackbox title: prometheus.exporter.blackbox --- @@ -74,7 +69,7 @@ Labels specified in the `labels` argument will not override labels set by `black ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -194,7 +189,7 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md b/docs/sources/reference/components/prometheus.exporter.cadvisor.md similarity index 88% rename from docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md rename to docs/sources/reference/components/prometheus.exporter.cadvisor.md index c40f951d9e..6f4ee0ba04 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md +++ b/docs/sources/reference/components/prometheus.exporter.cadvisor.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.cadvisor/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.cadvisor/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.cadvisor/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.cadvisor/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.cadvisor/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.cadvisor/ description: Learn about the prometheus.exporter.cadvisor title: prometheus.exporter.cadvisor --- @@ -71,7 +66,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -125,7 +120,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md b/docs/sources/reference/components/prometheus.exporter.cloudwatch.md similarity index 97% rename from docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md rename to docs/sources/reference/components/prometheus.exporter.cloudwatch.md index 4caae767f3..efcf9913f5 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md +++ b/docs/sources/reference/components/prometheus.exporter.cloudwatch.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.cloudwatch/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.cloudwatch/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.cloudwatch/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.cloudwatch/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.cloudwatch/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.cloudwatch/ description: Learn about prometheus.exporter.cloudwatch title: prometheus.exporter.cloudwatch --- @@ -341,7 +336,7 @@ This feature also prevents component scrape timeouts when you gather high volume ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health diff --git a/docs/sources/flow/reference/components/prometheus.exporter.consul.md b/docs/sources/reference/components/prometheus.exporter.consul.md similarity index 89% rename from docs/sources/flow/reference/components/prometheus.exporter.consul.md rename to docs/sources/reference/components/prometheus.exporter.consul.md index a8480208ed..bae1916106 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.consul.md +++ b/docs/sources/reference/components/prometheus.exporter.consul.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.consul/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.consul/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.consul/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.consul/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.consul/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.consul/ description: Learn about prometheus.exporter.consul title: prometheus.exporter.consul --- @@ -44,7 +39,7 @@ All arguments are optional. Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -96,7 +91,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md b/docs/sources/reference/components/prometheus.exporter.dnsmasq.md similarity index 81% rename from docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md rename to docs/sources/reference/components/prometheus.exporter.dnsmasq.md index 80fdd881ae..243bc03a15 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md +++ b/docs/sources/reference/components/prometheus.exporter.dnsmasq.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.dnsmasq/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.dnsmasq/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.dnsmasq/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.dnsmasq/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.dnsmasq/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.dnsmasq/ description: Learn about prometheus.exporter.dnsmasq title: prometheus.exporter.dnsmasq --- @@ -34,7 +29,7 @@ All arguments are optional. Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -86,7 +81,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md b/docs/sources/reference/components/prometheus.exporter.elasticsearch.md similarity index 89% rename from docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md rename to docs/sources/reference/components/prometheus.exporter.elasticsearch.md index 487ce82eab..147141f227 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md +++ b/docs/sources/reference/components/prometheus.exporter.elasticsearch.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.elasticsearch/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.elasticsearch/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.elasticsearch/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.elasticsearch/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.elasticsearch/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.elasticsearch/ description: Learn about prometheus.exporter.elasticsearch title: prometheus.exporter.elasticsearch --- @@ -69,11 +64,11 @@ The following blocks are supported inside the definition of ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -129,7 +124,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.gcp.md b/docs/sources/reference/components/prometheus.exporter.gcp.md similarity index 94% rename from docs/sources/flow/reference/components/prometheus.exporter.gcp.md rename to docs/sources/reference/components/prometheus.exporter.gcp.md index 017542a0a8..213c1b2b59 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.gcp.md +++ b/docs/sources/reference/components/prometheus.exporter.gcp.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.gcp/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.gcp/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.gcp/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.gcp/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.gcp/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.gcp/ description: Learn about prometheus.exporter.gcp title: prometheus.exporter.gcp --- @@ -82,7 +77,7 @@ For `ingest_delay`, you can see the values for this in documented metrics as `Af ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health diff --git a/docs/sources/flow/reference/components/prometheus.exporter.github.md b/docs/sources/reference/components/prometheus.exporter.github.md similarity index 84% rename from docs/sources/flow/reference/components/prometheus.exporter.github.md rename to docs/sources/reference/components/prometheus.exporter.github.md index 10b641a6e6..a803653ccd 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.github.md +++ b/docs/sources/reference/components/prometheus.exporter.github.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.github/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.github/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.github/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.github/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.github/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.github/ description: Learn about prometheus.exporter.github title: prometheus.exporter.github --- @@ -41,7 +36,7 @@ When provided, `api_token_file` takes precedence over `api_token`. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -94,7 +89,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.kafka.md b/docs/sources/reference/components/prometheus.exporter.kafka.md similarity index 92% rename from docs/sources/flow/reference/components/prometheus.exporter.kafka.md rename to docs/sources/reference/components/prometheus.exporter.kafka.md index 4dbd7c4c4c..643505c804 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.kafka.md +++ b/docs/sources/reference/components/prometheus.exporter.kafka.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.kafka/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.kafka/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.kafka/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.kafka/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.kafka/ description: Learn about prometheus.exporter.kafka title: prometheus.exporter.kafka --- @@ -54,7 +49,7 @@ Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -106,7 +101,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.memcached.md b/docs/sources/reference/components/prometheus.exporter.memcached.md similarity index 79% rename from docs/sources/flow/reference/components/prometheus.exporter.memcached.md rename to docs/sources/reference/components/prometheus.exporter.memcached.md index 8bf7d6e54f..f0c06223bb 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.memcached.md +++ b/docs/sources/reference/components/prometheus.exporter.memcached.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.memcached/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.memcached/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.memcached/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.memcached/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.memcached/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.memcached/ description: Learn about prometheus.exporter.memcached title: prometheus.exporter.memcached --- @@ -42,11 +37,11 @@ The following blocks are supported inside the definition of `prometheus.exporter ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -98,7 +93,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md b/docs/sources/reference/components/prometheus.exporter.mongodb.md similarity index 85% rename from docs/sources/flow/reference/components/prometheus.exporter.mongodb.md rename to docs/sources/reference/components/prometheus.exporter.mongodb.md index e6231dad9d..7de3495820 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md +++ b/docs/sources/reference/components/prometheus.exporter.mongodb.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.mongodb/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.mongodb/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.mongodb/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.mongodb/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.mongodb/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.mongodb/ description: Learn about prometheus.exporter.mongodb title: prometheus.exporter.mongodb --- @@ -46,7 +41,7 @@ For `tls_basic_auth_config_path`, check [`tls_config`](https://prometheus.io/doc ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -87,7 +82,7 @@ prometheus.remote_write "default" { } ``` -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md b/docs/sources/reference/components/prometheus.exporter.mssql.md similarity index 95% rename from docs/sources/flow/reference/components/prometheus.exporter.mssql.md rename to docs/sources/reference/components/prometheus.exporter.mssql.md index ef7e708591..ebaf00f475 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md +++ b/docs/sources/reference/components/prometheus.exporter.mssql.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.mssql/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.mssql/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.mssql/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.mssql/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.mssql/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.mssql/ description: Learn about prometheus.exporter.mssql title: prometheus.exporter.mssql --- @@ -75,7 +70,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -127,7 +122,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ ## Custom metrics You can use the optional `query_config` parameter to retrieve custom Prometheus metrics for a MSSQL instance. diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mysql.md b/docs/sources/reference/components/prometheus.exporter.mysql.md similarity index 95% rename from docs/sources/flow/reference/components/prometheus.exporter.mysql.md rename to docs/sources/reference/components/prometheus.exporter.mysql.md index 14df71386a..f062888739 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mysql.md +++ b/docs/sources/reference/components/prometheus.exporter.mysql.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.mysql/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.mysql/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.mysql/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.mysql/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.mysql/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.mysql/ description: Learn about prometheus.exporter.mysql title: prometheus.exporter.mysql --- @@ -158,7 +153,7 @@ The full list of supported collectors is: ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -211,7 +206,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md b/docs/sources/reference/components/prometheus.exporter.oracledb.md similarity index 83% rename from docs/sources/flow/reference/components/prometheus.exporter.oracledb.md rename to docs/sources/reference/components/prometheus.exporter.oracledb.md index a259a5bfae..ccad3484ec 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md +++ b/docs/sources/reference/components/prometheus.exporter.oracledb.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.oracledb/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.oracledb/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.oracledb/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.oracledb/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.oracledb/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.oracledb/ description: Learn about prometheus.exporter.oracledb title: prometheus.exporter.oracledb --- @@ -47,7 +42,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -99,7 +94,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md b/docs/sources/reference/components/prometheus.exporter.postgres.md similarity index 92% rename from docs/sources/flow/reference/components/prometheus.exporter.postgres.md rename to docs/sources/reference/components/prometheus.exporter.postgres.md index d5f6cc78ea..5c12182702 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md +++ b/docs/sources/reference/components/prometheus.exporter.postgres.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.postgres/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.postgres/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.postgres/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.postgres/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.postgres/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.postgres/ description: Learn about prometheus.exporter.postgres labels: stage: beta @@ -72,7 +67,7 @@ If `autodiscovery` is disabled, neither `database_allowlist` nor `database_denyl ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -212,7 +207,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.process.md b/docs/sources/reference/components/prometheus.exporter.process.md similarity index 90% rename from docs/sources/flow/reference/components/prometheus.exporter.process.md rename to docs/sources/reference/components/prometheus.exporter.process.md index 2ece4bfb96..bc38cae844 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.process.md +++ b/docs/sources/reference/components/prometheus.exporter.process.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.process/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.process/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.process/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.process/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.process/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.process/ description: Learn about prometheus.exporter.process title: prometheus.exporter.process --- @@ -76,7 +71,7 @@ Each regex in `cmdline` must match the corresponding argv for the process to be ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -132,7 +127,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.redis.md b/docs/sources/reference/components/prometheus.exporter.redis.md similarity index 93% rename from docs/sources/flow/reference/components/prometheus.exporter.redis.md rename to docs/sources/reference/components/prometheus.exporter.redis.md index 93cc839aeb..0f02e4f3d4 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.redis.md +++ b/docs/sources/reference/components/prometheus.exporter.redis.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.redis/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.redis/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.redis/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.redis/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.redis/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.redis/ description: Learn about prometheus.exporter.redis title: prometheus.exporter.redis --- @@ -78,7 +73,7 @@ Note that setting `export_client_port` increases the cardinality of all Redis me ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -130,7 +125,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.self.md b/docs/sources/reference/components/prometheus.exporter.self.md similarity index 80% rename from docs/sources/flow/reference/components/prometheus.exporter.self.md rename to docs/sources/reference/components/prometheus.exporter.self.md index 42970e3214..0b700825ed 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.self.md +++ b/docs/sources/reference/components/prometheus.exporter.self.md @@ -1,9 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.agent/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.agent/ -- ./prometheus.exporter.agent/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.self/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.self/ description: Learn about prometheus.exporter.self title: prometheus.exporter.self --- @@ -25,7 +21,7 @@ prometheus.exporter.self "agent" { ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -72,8 +68,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} - +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.snmp.md b/docs/sources/reference/components/prometheus.exporter.snmp.md similarity index 91% rename from docs/sources/flow/reference/components/prometheus.exporter.snmp.md rename to docs/sources/reference/components/prometheus.exporter.snmp.md index 2773809724..d910dd3018 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.snmp.md +++ b/docs/sources/reference/components/prometheus.exporter.snmp.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.snmp/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.snmp/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.snmp/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.snmp/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.snmp/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.snmp/ description: Learn about prometheus.exporter.snmp title: prometheus.exporter.snmp --- @@ -89,7 +84,7 @@ The `walk_param` block may be specified multiple times to define multiple SNMP c ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -198,7 +193,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md b/docs/sources/reference/components/prometheus.exporter.snowflake.md similarity index 83% rename from docs/sources/flow/reference/components/prometheus.exporter.snowflake.md rename to docs/sources/reference/components/prometheus.exporter.snowflake.md index c0b0758260..30e676e9f4 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md +++ b/docs/sources/reference/components/prometheus.exporter.snowflake.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.snowflake/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.snowflake/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.snowflake/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.snowflake/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.snowflake/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.snowflake/ description: Learn about prometheus.exporter.snowflake title: prometheus.exporter.snowflake --- @@ -45,7 +40,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -100,7 +95,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.squid.md b/docs/sources/reference/components/prometheus.exporter.squid.md similarity index 81% rename from docs/sources/flow/reference/components/prometheus.exporter.squid.md rename to docs/sources/reference/components/prometheus.exporter.squid.md index 44df648863..ab85cccb78 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.squid.md +++ b/docs/sources/reference/components/prometheus.exporter.squid.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.squid/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.squid/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.squid/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.squid/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.squid/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.squid/ description: Learn about prometheus.exporter.squid title: prometheus.exporter.squid --- @@ -40,7 +35,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -92,7 +87,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.statsd.md b/docs/sources/reference/components/prometheus.exporter.statsd.md similarity index 90% rename from docs/sources/flow/reference/components/prometheus.exporter.statsd.md rename to docs/sources/reference/components/prometheus.exporter.statsd.md index 40eb9e4eda..799ec989c6 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.statsd.md +++ b/docs/sources/reference/components/prometheus.exporter.statsd.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.statsd/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.statsd/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.statsd/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.statsd/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.statsd/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.statsd/ description: Learn about prometheus.exporter.statsd title: prometheus.exporter.statsd --- @@ -59,7 +54,7 @@ fully through arguments. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -125,7 +120,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.unix.md b/docs/sources/reference/components/prometheus.exporter.unix.md similarity index 98% rename from docs/sources/flow/reference/components/prometheus.exporter.unix.md rename to docs/sources/reference/components/prometheus.exporter.unix.md index 46f4f64e9b..1d322aced9 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.unix.md +++ b/docs/sources/reference/components/prometheus.exporter.unix.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.unix/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.unix/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.unix/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.unix/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.unix/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.unix/ description: Learn about prometheus.exporter.unix title: prometheus.exporter.unix --- @@ -262,7 +257,7 @@ An explicit value in the block takes precedence over the environment variable. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -408,7 +403,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md b/docs/sources/reference/components/prometheus.exporter.vsphere.md similarity index 80% rename from docs/sources/flow/reference/components/prometheus.exporter.vsphere.md rename to docs/sources/reference/components/prometheus.exporter.vsphere.md index 558eff9f90..869cc61a75 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md +++ b/docs/sources/reference/components/prometheus.exporter.vsphere.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.vsphere/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.vsphere/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.vsphere/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.vsphere/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.vsphere/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.vsphere/ title: prometheus.exporter.vsphere description: Learn about prometheus.exporter.vsphere --- @@ -13,10 +8,11 @@ description: Learn about prometheus.exporter.vsphere The `prometheus.exporter.vsphere` component embeds [`vmware_exporter`](https://github.com/grafana/vmware_exporter) to collect vSphere metrics -> **NOTE**: We recommend to use [otelcol.receiver.vcenter][] instead. - -[otelcol.receiver.vcenter]: {{< relref "./otelcol.receiver.vcenter.md" >}} +{{< admonition type="note" >}} +We recommend to use [otelcol.receiver.vcenter][] instead. +[otelcol.receiver.vcenter]: ./otelcol.receiver.vcenter/ +{{< /admonition >}} ## Usage @@ -45,7 +41,7 @@ Omitted fields take their default values. ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -88,7 +84,7 @@ prometheus.remote_write "default" { } ``` -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.exporter.windows.md b/docs/sources/reference/components/prometheus.exporter.windows.md similarity index 96% rename from docs/sources/flow/reference/components/prometheus.exporter.windows.md rename to docs/sources/reference/components/prometheus.exporter.windows.md index 85c2948256..65c38a2286 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.windows.md +++ b/docs/sources/reference/components/prometheus.exporter.windows.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.exporter.windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.exporter.windows/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.exporter.windows/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.exporter.windows/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.exporter.windows/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.exporter.windows/ description: Learn about prometheus.exporter.windows title: prometheus.exporter.windows --- @@ -202,7 +197,7 @@ When `text_file_directory` is set, only files with the extension `.prom` inside ## Exported fields -{{< docs/shared lookup="flow/reference/components/exporter-component-exports.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/exporter-component-exports.md" source="alloy" version="" >}} ## Component health @@ -321,7 +316,7 @@ Replace the following: - `USERNAME`: The username to use for authentication to the remote_write API. - `PASSWORD`: The password to use for authentication to the remote_write API. -[scrape]: {{< relref "./prometheus.scrape.md" >}} +[scrape]: ../prometheus.scrape/ diff --git a/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md b/docs/sources/reference/components/prometheus.operator.podmonitors.md similarity index 87% rename from docs/sources/flow/reference/components/prometheus.operator.podmonitors.md rename to docs/sources/reference/components/prometheus.operator.podmonitors.md index 34d73ae784..695a58f004 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md +++ b/docs/sources/reference/components/prometheus.operator.podmonitors.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.operator.podmonitors/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.operator.podmonitors/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.operator.podmonitors/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.podmonitors/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.podmonitors/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.operator.podmonitors/ description: Learn about prometheus.operator.podmonitors labels: stage: beta @@ -13,7 +8,7 @@ title: prometheus.operator.podmonitors # prometheus.operator.podmonitors -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `prometheus.operator.podmonitors` discovers [PodMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitor) resources in your kubernetes cluster and scrapes the targets they reference. This component performs three main functions: @@ -103,31 +98,31 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block.md" source="alloy" version="" >}} ### scrape block -{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/prom-operator-scrape.md" source="alloy" version="" >}} ### selector block @@ -191,7 +186,7 @@ fully consistent like hashmod sharding is). If {{< param "PRODUCT_ROOT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op, and `prometheus.operator.podmonitors` scrapes every target it receives in its arguments. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Exported fields diff --git a/docs/sources/flow/reference/components/prometheus.operator.probes.md b/docs/sources/reference/components/prometheus.operator.probes.md similarity index 87% rename from docs/sources/flow/reference/components/prometheus.operator.probes.md rename to docs/sources/reference/components/prometheus.operator.probes.md index b51f0eef0b..7347d18379 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.probes.md +++ b/docs/sources/reference/components/prometheus.operator.probes.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.operator.probes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.operator.probes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.operator.probes/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.probes/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.probes/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.operator.probes/ description: Learn about prometheus.operator.probes labels: stage: beta @@ -13,7 +8,7 @@ title: prometheus.operator.probes # prometheus.operator.probes -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `prometheus.operator.probes` discovers [Probe](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.Probe) resources in your Kubernetes cluster and scrapes the targets they reference. This component performs three main functions: @@ -105,31 +100,31 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block.md" source="alloy" version="" >}} ### scrape block -{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/prom-operator-scrape.md" source="alloy" version="" >}} ### selector block @@ -193,7 +188,7 @@ fully consistent like hashmod sharding is). If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op, and `prometheus.operator.probes` scrapes every target it receives in its arguments. -[clustered mode]: {{< relref "../cli/run.md#clustering-beta" >}} +[clustered mode]: ../../cli/run/#clustering-beta ## Exported fields diff --git a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md b/docs/sources/reference/components/prometheus.operator.servicemonitors.md similarity index 87% rename from docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md rename to docs/sources/reference/components/prometheus.operator.servicemonitors.md index b3e89eee32..c62a7906d9 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md +++ b/docs/sources/reference/components/prometheus.operator.servicemonitors.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.operator.servicemonitors/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.operator.servicemonitors/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.operator.servicemonitors/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.servicemonitors/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.operator.servicemonitors/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.operator.servicemonitors/ description: Learn about prometheus.operator.servicemonitors labels: stage: beta @@ -13,7 +8,7 @@ title: prometheus.operator.servicemonitors # prometheus.operator.servicemonitors -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `prometheus.operator.servicemonitors` discovers [ServiceMonitor](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.ServiceMonitor) resources in your kubernetes cluster and scrapes the targets they reference. This component performs three main functions: @@ -104,31 +99,31 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block.md" source="alloy" version="" >}} ### scrape block -{{< docs/shared lookup="flow/reference/components/prom-operator-scrape.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/prom-operator-scrape.md" source="alloy" version="" >}} ### selector block @@ -192,7 +187,7 @@ fully consistent like hashmod sharding is). If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op, and `prometheus.operator.servicemonitors` scrapes every target it receives in its arguments. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Exported fields diff --git a/docs/sources/flow/reference/components/prometheus.receive_http.md b/docs/sources/reference/components/prometheus.receive_http.md similarity index 87% rename from docs/sources/flow/reference/components/prometheus.receive_http.md rename to docs/sources/reference/components/prometheus.receive_http.md index dd78e88ad1..3fa7e1905a 100644 --- a/docs/sources/flow/reference/components/prometheus.receive_http.md +++ b/docs/sources/reference/components/prometheus.receive_http.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.receive_http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.receive_http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.receive_http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.receive_http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.receive_http/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.receive_http/ description: Learn about prometheus.receive_http title: prometheus.receive_http --- @@ -13,9 +8,9 @@ title: prometheus.receive_http `prometheus.receive_http` listens for HTTP requests containing Prometheus metric samples and forwards them to other components capable of receiving metrics. -The HTTP API exposed is compatible with [Prometheus `remote_write` API][prometheus-remote-write-docs]. This means that other [`prometheus.remote_write`][prometheus.remote_write] components can be used as a client and send requests to `prometheus.receive_http` which enables using {{< param "PRODUCT_ROOT_NAME" >}} as a proxy for prometheus metrics. +The HTTP API exposed is compatible with [Prometheus `remote_write` API][prometheus-remote-write-docs]. This means that other [`prometheus.remote_write`][prometheus.remote_write] components can be used as a client and send requests to `prometheus.receive_http` which enables using {{< param "PRODUCT_ROOT_NAME" >}} as a proxy for Prometheus metrics. -[prometheus.remote_write]: {{< relref "./prometheus.remote_write.md" >}} +[prometheus.remote_write]: ../prometheus.remote_write/ [prometheus-remote-write-docs]: https://prometheus.io/docs/prometheus/2.45/querying/api/#remote-write-receiver ## Usage @@ -54,7 +49,7 @@ Hierarchy | Name | Description | Requ ### http -{{< docs/shared lookup="flow/reference/components/loki-server-http.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/loki-server-http.md" source="alloy" version="" >}} ## Exported fields @@ -86,7 +81,7 @@ This example creates a `prometheus.receive_http` component which starts an HTTP prometheus.receive_http "api" { http { listen_address = "0.0.0.0" - listen_port = 9999 + listen_port = 9999 } forward_to = [prometheus.remote_write.local.receiver] } @@ -95,7 +90,7 @@ prometheus.receive_http "api" { prometheus.remote_write "local" { endpoint { url = "http://mimir:9009/api/v1/push" - + basic_auth { username = "example-user" password = "example-password" diff --git a/docs/sources/flow/reference/components/prometheus.relabel.md b/docs/sources/reference/components/prometheus.relabel.md similarity index 92% rename from docs/sources/flow/reference/components/prometheus.relabel.md rename to docs/sources/reference/components/prometheus.relabel.md index 6ff90a88f0..9b471a50e2 100644 --- a/docs/sources/flow/reference/components/prometheus.relabel.md +++ b/docs/sources/reference/components/prometheus.relabel.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.relabel/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.relabel/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.relabel/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.relabel/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/ description: Learn about prometheus.relabel title: prometheus.relabel --- @@ -70,7 +65,7 @@ rule | [rule][] | Relabeling rules to apply to received metrics. | no ### rule block -{{< docs/shared lookup="flow/reference/components/rule-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/rule-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/prometheus.remote_write.md b/docs/sources/reference/components/prometheus.remote_write.md similarity index 71% rename from docs/sources/flow/reference/components/prometheus.remote_write.md rename to docs/sources/reference/components/prometheus.remote_write.md index 12882a498e..480e6b7120 100644 --- a/docs/sources/flow/reference/components/prometheus.remote_write.md +++ b/docs/sources/reference/components/prometheus.remote_write.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.remote_write/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.remote_write/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.remote_write/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.remote_write/ description: Learn about prometheus.remote_write title: prometheus.remote_write --- @@ -128,35 +123,35 @@ sent to `prometheus.remote_write` are forwarded to the configured endpoint. If the endpoint doesn't support receiving native histogram samples, pushing metrics fails. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### sigv4 block -{{< docs/shared lookup="flow/reference/components/sigv4-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/sigv4-block.md" source="alloy" version="" >}} ### azuread block -{{< docs/shared lookup="flow/reference/components/azuread-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/azuread-block.md" source="alloy" version="" >}} ### managed_identity block -{{< docs/shared lookup="flow/reference/components/managed_identity-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/managed_identity-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### queue_config block @@ -211,7 +206,7 @@ Name | Type | Description | Default | Required ### write_relabel_config block -{{< docs/shared lookup="flow/reference/components/write_relabel_config.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/write_relabel_config.md" source="alloy" version="" >}} ### wal block @@ -244,7 +239,7 @@ of data in the WAL; samples aren't removed until they are at least as old as `min_keepalive_time`, and samples are forcibly removed if they are older than `max_keepalive_time`. -[run]: {{< relref "../cli/run.md" >}} +[run]: {../../cli/run/ ## Exported fields @@ -375,7 +370,8 @@ prometheus.scrape "demo" { ### Send metrics to a Mimir instance with a tenant specified -You can create a `prometheus.remote_write` component that sends your metrics to a specific tenant within the Mimir instance. This is useful when your Mimir instance is using more than one tenant: +You can create a `prometheus.remote_write` component that sends your metrics to a specific tenant within the Mimir instance. +This is useful when your Mimir instance is using more than one tenant: ```river prometheus.remote_write "staging" { @@ -392,7 +388,8 @@ prometheus.remote_write "staging" { ### Send metrics to a managed service -You can create a `prometheus.remote_write` component that sends your metrics to a managed service, for example, Grafana Cloud. The Prometheus username and the Grafana Cloud API Key are injected in this example through environment variables. +You can create a `prometheus.remote_write` component that sends your metrics to a managed service, for example, Grafana Cloud. +The Prometheus username and the Grafana Cloud API Key are injected in this example through environment variables. ```river prometheus.remote_write "default" { @@ -407,13 +404,110 @@ prometheus.remote_write "default" { ``` ## Technical details -`prometheus.remote_write` uses [snappy](https://en.wikipedia.org/wiki/Snappy_(compression)) for compression. +`prometheus.remote_write` uses [snappy][] for compression. Any labels that start with `__` will be removed before sending to the endpoint. ## Data retention -{{< docs/shared source="agent" lookup="/wal-data-retention.md" version="" >}} +The `prometheus.remote_write` component uses a Write Ahead Log (WAL) to prevent +data loss during network outages. The component buffers the received metrics in +a WAL for each configured endpoint. The queue shards can use the WAL after the +network outage is resolved and flush the buffered metrics to the endpoints. + +The WAL records metrics in 128 MB files called segments. To avoid having a WAL +that grows on-disk indefinitely, the component _truncates_ its segments on a +set interval. + +On each truncation, the WAL deletes references to series that are no longer +present and also _checkpoints_ roughly the oldest two thirds of the segments +(rounded down to the nearest integer) written to it since the last truncation +period. A checkpoint means that the WAL only keeps track of the unique +identifier for each existing metrics series, and can no longer use the samples +for remote writing. If that data has not yet been pushed to the remote +endpoint, it is lost. + +This behavior dictates the data retention for the `prometheus.remote_write` +component. It also means that it's impossible to directly correlate data +retention directly to the data age itself, as the truncation logic works on +_segments_, not the samples themselves. This makes data retention less +predictable when the component receives a non-consistent rate of data. + +The [WAL block][] in Flow mode, or the [metrics config][] in Static mode +contain some configurable parameters that can be used to control the tradeoff +between memory usage, disk usage, and data retention. + +The `truncate_frequency` or `wal_truncate_frequency` parameter configures the +interval at which truncations happen. A lower value leads to reduced memory +usage, but also provides less resiliency to long outages. + +When a WAL clean-up starts, the most recently successfully sent timestamp is +used to determine how much data is safe to remove from the WAL. +The `min_keepalive_time` or `min_wal_time` controls the minimum age of samples +considered for removal. No samples more recent than `min_keepalive_time` are +removed. The `max_keepalive_time` or `max_wal_time` controls the maximum age of +samples that can be kept in the WAL. Samples older than +`max_keepalive_time` are forcibly removed. + +### Extended `remote_write` outages +When the remote write endpoint is unreachable over a period of time, the most +recent successfully sent timestamp is not updated. The +`min_keepalive_time` and `max_keepalive_time` arguments control the age range +of data kept in the WAL. + +If the remote write outage is longer than the `max_keepalive_time` parameter, +then the WAL is truncated, and the oldest data is lost. + +### Intermittent `remote_write` outages +If the remote write endpoint is intermittently reachable, the most recent +successfully sent timestamp is updated whenever the connection is successful. +A successful connection updates the series' comparison with +`min_keepalive_time` and triggers a truncation on the next `truncate_frequency` +interval which checkpoints two thirds of the segments (rounded down to the +nearest integer) written since the previous truncation. + +### Falling behind +If the queue shards cannot flush data quickly enough to keep +up-to-date with the most recent data buffered in the WAL, we say that the +component is 'falling behind'. +It's not unusual for the component to temporarily fall behind 2 or 3 scrape intervals. +If the component falls behind more than one third of the data written since the +last truncate interval, it is possible for the truncate loop to checkpoint data +before being pushed to the remote_write endpoint. + +### WAL corruption + +WAL corruption can occur when Grafana Agent unexpectedly stops while the latest WAL segments +are still being written to disk. For example, the host computer has a general disk failure +and crashes before you can stop Grafana Agent and other running services. When you restart Grafana +Agent, it verifies the WAL, removing any corrupt segments it finds. Sometimes, this repair +is unsuccessful, and you must manually delete the corrupted WAL to continue. + +If the WAL becomes corrupted, Grafana Agent writes error messages such as +`err="failed to find segment for index"` to the log file. + +{{< admonition type="note" >}} +Deleting a WAL segment or a WAL file permanently deletes the stored WAL data. +{{< /admonition >}} + +To delete the corrupted WAL: + +1. [Stop][] Grafana Agent. +1. Find and delete the contents of the `wal` directory. + + By default the `wal` directory is a subdirectory + of the `data-agent` directory located in the Grafana Agent working directory. The WAL data directory + may be different than the default depending on the [wal_directory][] setting in your Static configuration + file or the path specified by the Flow [command line flag][run] `--storage-path`. + + {{< admonition type="note" >}} + There is one `wal` directory per: + + * Metrics instance running in Static mode + * `prometheus.remote_write` component running in Flow mode + {{< /admonition >}} + +1. [Start][Stop] Grafana Agent and verify that the WAL is working correctly. @@ -429,3 +523,10 @@ Refer to the linked documentation for more details. {{< /admonition >}} + +[snappy]: https://en.wikipedia.org/wiki/Snappy_(compression) +[WAL block]: /docs/agent//flow/reference/components/prometheus.remote_write#wal-block +[metrics config]: /docs/agent//static/configuration/metrics-config +[Stop]: /docs/agent//flow/get-started/start-agent +[wal_directory]: /docs/agent//static/configuration/metrics-config +[run]: /docs/agent//flow/reference/cli/run diff --git a/docs/sources/flow/reference/components/prometheus.scrape.md b/docs/sources/reference/components/prometheus.scrape.md similarity index 63% rename from docs/sources/flow/reference/components/prometheus.scrape.md rename to docs/sources/reference/components/prometheus.scrape.md index e329bfe4e5..864c7e7d1e 100644 --- a/docs/sources/flow/reference/components/prometheus.scrape.md +++ b/docs/sources/reference/components/prometheus.scrape.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/prometheus.scrape/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/prometheus.scrape/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/prometheus.scrape/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.scrape/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/prometheus.scrape/ description: Learn about prometheus.scrape title: prometheus.scrape --- @@ -42,36 +37,36 @@ time), the component reports an error. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`targets` | `list(map(string))` | List of targets to scrape. | | yes -`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes -`job_name` | `string` | The value to use for the job label if not already set. | component name | no -`extra_metrics` | `bool` | Whether extra metrics should be generated for scrape targets. | `false` | no -`enable_protobuf_negotiation` | `bool` | Whether to enable protobuf negotiation with the client. | `false` | no -`honor_labels` | `bool` | Indicator whether the scraped metrics should remain unmodified. | `false` | no -`honor_timestamps` | `bool` | Indicator whether the scraped timestamps should be respected. | `true` | no -`track_timestamps_staleness` | `bool` | Indicator whether to track the staleness of the scraped timestamps. | `false` | no -`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no -`scrape_classic_histograms` | `bool` | Whether to scrape a classic histogram that is also exposed as a native histogram. | `false` | no -`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"60s"` | no -`scrape_timeout` | `duration` | The timeout for scraping targets of this configuration. | `"10s"` | no -`metrics_path` | `string` | The HTTP resource path on which to fetch metrics from targets. | `/metrics` | no -`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | | no -`body_size_limit` | `int` | An uncompressed response body larger than this many bytes causes the scrape to fail. 0 means no limit. | | no -`sample_limit` | `uint` | More than this many samples post metric-relabeling causes the scrape to fail | | no -`target_limit` | `uint` | More than this many targets after the target relabeling causes the scrapes to fail. | | no -`label_limit` | `uint` | More than this many labels post metric-relabeling causes the scrape to fail. | | no -`label_name_length_limit` | `uint` | More than this label name length post metric-relabeling causes the scrape to fail. | | no -`label_value_length_limit` | `uint` | More than this label value length post metric-relabeling causes the scrape to fail. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`proxy_url` | `string` | HTTP proxy to send requests through. | | no -`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no -`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no -`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no +Name | Type | Description | Default | Required +------------------------------|-------------------------|--------------------------------------------------------------------------------------------------------|----------------|--------- +`targets` | `list(map(string))` | List of targets to scrape. | | yes +`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes +`job_name` | `string` | The value to use for the job label if not already set. | component name | no +`extra_metrics` | `bool` | Whether extra metrics should be generated for scrape targets. | `false` | no +`enable_protobuf_negotiation` | `bool` | Whether to enable protobuf negotiation with the client. | `false` | no +`honor_labels` | `bool` | Indicator whether the scraped metrics should remain unmodified. | `false` | no +`honor_timestamps` | `bool` | Indicator whether the scraped timestamps should be respected. | `true` | no +`track_timestamps_staleness` | `bool` | Indicator whether to track the staleness of the scraped timestamps. | `false` | no +`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no +`scrape_classic_histograms` | `bool` | Whether to scrape a classic histogram that is also exposed as a native histogram. | `false` | no +`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"60s"` | no +`scrape_timeout` | `duration` | The timeout for scraping targets of this configuration. | `"10s"` | no +`metrics_path` | `string` | The HTTP resource path on which to fetch metrics from targets. | `/metrics` | no +`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | | no +`body_size_limit` | `int` | An uncompressed response body larger than this many bytes causes the scrape to fail. 0 means no limit. | | no +`sample_limit` | `uint` | More than this many samples post metric-relabeling causes the scrape to fail | | no +`target_limit` | `uint` | More than this many targets after the target relabeling causes the scrapes to fail. | | no +`label_limit` | `uint` | More than this many labels post metric-relabeling causes the scrape to fail. | | no +`label_name_length_limit` | `uint` | More than this label name length post metric-relabeling causes the scrape to fail. | | no +`label_value_length_limit` | `uint` | More than this label value length post metric-relabeling causes the scrape to fail. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`proxy_url` | `string` | HTTP proxy to send requests through. | | no +`no_proxy` | `string` | Comma-separated list of IP addresses, CIDR notations, and domain names to exclude from proxying. | | no +`proxy_from_environment` | `bool` | Use the proxy URL indicated by environment variables. | `false` | no +`proxy_connect_header` | `map(list(secret))` | Specifies headers to send to proxies during CONNECT requests. | | no At most, one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -80,7 +75,7 @@ Name | Type | Description | Default | Required - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} `track_timestamps_staleness` controls whether Prometheus tracks [staleness][prom-staleness] of metrics which with an explicit timestamp present in scraped data. * An "explicit timestamp" is an optional timestamp in the [Prometheus metrics exposition format][prom-text-exposition-format]. For example, this sample has a timestamp of `1395066363000`: @@ -100,14 +95,14 @@ Name | Type | Description | Default | Required The following blocks are supported inside the definition of `prometheus.scrape`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -basic_auth | [basic_auth][] | Configure basic_auth for authenticating to targets. | no -authorization | [authorization][] | Configure generic authorization to targets. | no -oauth2 | [oauth2][] | Configure OAuth2 for authenticating to targets. | no -oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to targets via OAuth2. | no -tls_config | [tls_config][] | Configure TLS settings for connecting to targets. | no -clustering | [clustering][] | Configure the component for when the Agent is running in clustered mode. | no +Hierarchy | Block | Description | Required +--------------------|-------------------|--------------------------------------------------------------------------|--------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to targets. | no +authorization | [authorization][] | Configure generic authorization to targets. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to targets. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to targets via OAuth2. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to targets. | no +clustering | [clustering][] | Configure the component for when the Agent is running in clustered mode. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside @@ -122,19 +117,19 @@ an `oauth2` block. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### clustering (beta) @@ -165,7 +160,7 @@ fully consistent like hashmod sharding is). If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, then the block is a no-op and `prometheus.scrape` scrapes every target it receives in its arguments. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Exported fields @@ -259,8 +254,8 @@ To enable scraping of Prometheus' native histograms over gRPC, the scrape the 'classic' histogram equivalent of a native histogram, if it is present. -[in-memory traffic]: {{< relref "../../concepts/component_controller.md#in-memory-traffic" >}} -[run command]: {{< relref "../cli/run.md" >}} +[in-memory traffic]: ../../../concepts/component_controller/#in-memory-traffic +[run command]: ../../cli/run/ ## Example diff --git a/docs/sources/flow/reference/components/pyroscope.ebpf.md b/docs/sources/reference/components/pyroscope.ebpf.md similarity index 95% rename from docs/sources/flow/reference/components/pyroscope.ebpf.md rename to docs/sources/reference/components/pyroscope.ebpf.md index dd13550576..2a793e1140 100644 --- a/docs/sources/flow/reference/components/pyroscope.ebpf.md +++ b/docs/sources/reference/components/pyroscope.ebpf.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/pyroscope.ebpf/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.ebpf/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.ebpf/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.ebpf/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.ebpf/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/pyroscope.ebpf/ description: Learn about pyroscope.ebpf labels: stage: beta @@ -13,7 +8,7 @@ title: pyroscope.ebpf # pyroscope.ebpf -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `pyroscope.ebpf` configures an ebpf profiling job for the current host. The collected performance profiles are forwarded to the list of receivers passed in `forward_to`. diff --git a/docs/sources/flow/reference/components/pyroscope.java.md b/docs/sources/reference/components/pyroscope.java.md similarity index 92% rename from docs/sources/flow/reference/components/pyroscope.java.md rename to docs/sources/reference/components/pyroscope.java.md index 3fdc810529..38eade4c77 100644 --- a/docs/sources/flow/reference/components/pyroscope.java.md +++ b/docs/sources/reference/components/pyroscope.java.md @@ -1,17 +1,12 @@ --- -aliases: - - /docs/grafana-cloud/agent/flow/reference/components/pyroscope.java/ - - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.java/ - - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.java/ - - /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.java/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.java/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/pyroscope.java/ description: Learn about pyroscope.java title: pyroscope.java --- # pyroscope.java -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `pyroscope.java` continuously profiles Java processes running on the local Linux OS using [async-profiler](https://github.com/async-profiler/async-profiler). diff --git a/docs/sources/flow/reference/components/pyroscope.scrape.md b/docs/sources/reference/components/pyroscope.scrape.md similarity index 95% rename from docs/sources/flow/reference/components/pyroscope.scrape.md rename to docs/sources/reference/components/pyroscope.scrape.md index 813035c8e2..094ec77e26 100644 --- a/docs/sources/flow/reference/components/pyroscope.scrape.md +++ b/docs/sources/reference/components/pyroscope.scrape.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/pyroscope.scrape/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.scrape/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.scrape/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.scrape/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.scrape/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/pyroscope.scrape/ description: Learn about pyroscope.scrape labels: stage: beta @@ -13,7 +8,7 @@ title: pyroscope.scrape # pyroscope.scrape -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `pyroscope.scrape` collects [pprof] performance profiles for a given set of HTTP `targets`. @@ -42,7 +37,7 @@ The scraped performance profiles can be forwarded to components such as Multiple `pyroscope.scrape` components can be specified by giving them different labels. -[debug UI]: {{< relref "../../tasks/debug.md" >}} +[debug UI]: ../../../tasks/debug/ ## Usage @@ -95,7 +90,7 @@ Name | Type | Description [arguments]: #arguments -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} #### `job_name` argument @@ -219,19 +214,19 @@ the defaults documented in [profile.mutex][] will be used. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ### profiling_config block @@ -410,7 +405,7 @@ APIs. If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, this block is a no-op. -[using clustering]: {{< relref "../../concepts/clustering.md" >}} +[using clustering]: ../../../concepts/clustering/ ## Common configuration diff --git a/docs/sources/flow/reference/components/pyroscope.write.md b/docs/sources/reference/components/pyroscope.write.md similarity index 84% rename from docs/sources/flow/reference/components/pyroscope.write.md rename to docs/sources/reference/components/pyroscope.write.md index 403aef0719..d08735e216 100644 --- a/docs/sources/flow/reference/components/pyroscope.write.md +++ b/docs/sources/reference/components/pyroscope.write.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/pyroscope.write/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/pyroscope.write/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/pyroscope.write/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/pyroscope.write/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.write/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/pyroscope.write/ description: Learn about pyroscope.write labels: stage: beta @@ -13,7 +8,7 @@ title: pyroscope.write # pyroscope.write -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `pyroscope.write` receives performance profiles from other components and forwards them to a series of user-supplied endpoints using [Pyroscope' Push API](/oss/pyroscope/). @@ -99,26 +94,26 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} When multiple `endpoint` blocks are provided, profiles are concurrently forwarded to all configured locations. ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/remote.http.md b/docs/sources/reference/components/remote.http.md similarity index 54% rename from docs/sources/flow/reference/components/remote.http.md rename to docs/sources/reference/components/remote.http.md index e91fc6c409..a959739954 100644 --- a/docs/sources/flow/reference/components/remote.http.md +++ b/docs/sources/reference/components/remote.http.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/remote.http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/remote.http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.http/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/remote.http/ description: Learn about remote.http title: remote.http --- @@ -32,15 +27,15 @@ remote.http "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`url` | `string` | URL to poll. | | yes -`method` | `string` | Define HTTP method for the request | `"GET"` | no -`headers` | `map(string)` | Custom headers for the request. | `{}` | no -`body` | `string` | The request body. | `""` | no -`poll_frequency` | `duration` | Frequency to poll the URL. | `"1m"` | no -`poll_timeout` | `duration` | Timeout when polling the URL. | `"10s"` | no -`is_secret` | `bool` | Whether the response body should be treated as a secret. | false | no +Name | Type | Description | Default | Required +-----------------|---------------|----------------------------------------------------------|---------|--------- +`url` | `string` | URL to poll. | | yes +`method` | `string` | Define HTTP method for the request | `"GET"` | no +`headers` | `map(string)` | Custom headers for the request. | `{}` | no +`body` | `string` | The request body. | `""` | no +`poll_frequency` | `duration` | Frequency to poll the URL. | `"1m"` | no +`poll_timeout` | `duration` | Timeout when polling the URL. | `"10s"` | no +`is_secret` | `bool` | Whether the response body should be treated as a secret. | false | no When `remote.http` performs a poll operation, an HTTP `GET` request is made against the URL specified by the `url` argument. A poll is triggered by the @@ -54,20 +49,20 @@ The poll is successful if the URL returns a `200 OK` response code. All other response codes are treated as errors and mark the component as unhealthy. After a successful poll, the response body from the URL is exported. -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets ## Blocks The following blocks are supported inside the definition of `remote.http`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client | [client][] | HTTP client settings when connecting to the endpoint. | no -client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no -client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no -client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no -client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no -client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +Hierarchy | Block | Description | Required +-----------------------------|-------------------|----------------------------------------------------------|--------- +client | [client][] | HTTP client settings when connecting to the endpoint. | no +client > basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +client > authorization | [authorization][] | Configure generic authorization to the endpoint. | no +client > oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +client > oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +client > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `client > basic_auth` refers to an `basic_auth` block defined inside a `client` block. @@ -83,34 +78,34 @@ basic_auth` refers to an `basic_auth` block defined inside a `client` block. The `client` block configures settings used to connect to the HTTP server. -{{< docs/shared lookup="flow/reference/components/http-client-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-config-block.md" source="alloy" version="" >}} ### basic_auth block The `basic_auth` block configures basic authentication to use when polling the configured URL. -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block The `authorization` block configures custom authorization to use when polling the configured URL. -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block The `oauth2` block configures OAuth2 authorization to use when polling the configured URL. -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block The `tls_config` block configures TLS settings for connecting to HTTPS servers. -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/remote.kubernetes.configmap.md b/docs/sources/reference/components/remote.kubernetes.configmap.md similarity index 87% rename from docs/sources/flow/reference/components/remote.kubernetes.configmap.md rename to docs/sources/reference/components/remote.kubernetes.configmap.md index adbaf214d2..d4c37cbc05 100644 --- a/docs/sources/flow/reference/components/remote.kubernetes.configmap.md +++ b/docs/sources/reference/components/remote.kubernetes.configmap.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.kubernetes.configmap/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.kubernetes.configmap/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.kubernetes.configmap/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/remote.kubernetes.configmap/ description: Learn about remote.kubernetes.configmap title: remote.kubernetes.configmap --- @@ -93,23 +90,23 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields diff --git a/docs/sources/flow/reference/components/remote.kubernetes.secret.md b/docs/sources/reference/components/remote.kubernetes.secret.md similarity index 87% rename from docs/sources/flow/reference/components/remote.kubernetes.secret.md rename to docs/sources/reference/components/remote.kubernetes.secret.md index 8e5a7cd966..00ae508a95 100644 --- a/docs/sources/flow/reference/components/remote.kubernetes.secret.md +++ b/docs/sources/reference/components/remote.kubernetes.secret.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.kubernetes.secret/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.kubernetes.secret/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.kubernetes.secret/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/remote.kubernetes.secret/ description: Learn about remote.kubernetes.secret title: remote.kubernetes.secret --- @@ -92,23 +89,23 @@ Name | Type | Description - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} ## Exported fields @@ -131,7 +128,7 @@ nonsensitive(remote.kubernetes.secret.LABEL.data.KEY_NAME) Using `nonsensitive` allows for using the exports of `remote.kubernetes.secret` for attributes in components that do not support secrets. -[nonsensitive]: {{< relref "../stdlib/nonsensitive.md" >}} +[nonsensitive]: ../../stdlib/nonsensitive/ ## Component health diff --git a/docs/sources/flow/reference/components/remote.s3.md b/docs/sources/reference/components/remote.s3.md similarity index 87% rename from docs/sources/flow/reference/components/remote.s3.md rename to docs/sources/reference/components/remote.s3.md index c4ec8e195e..a0ad69a767 100644 --- a/docs/sources/flow/reference/components/remote.s3.md +++ b/docs/sources/reference/components/remote.s3.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/reference/components/remote.s3/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.s3/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/remote.s3/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.s3/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.s3/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/remote.s3/ description: Learn about remote.s3 title: remote.s3 --- @@ -44,7 +39,7 @@ Name | Type | Description | Default | Required > **NOTE**: `path` must include a full path to a file. This does not support reading of directories. -[secret]: {{< relref "../../concepts/config-language/expressions/types_and_values.md#secrets" >}} +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets ## Blocks diff --git a/docs/sources/flow/reference/components/remote.vault.md b/docs/sources/reference/components/remote.vault.md similarity index 82% rename from docs/sources/flow/reference/components/remote.vault.md rename to docs/sources/reference/components/remote.vault.md index a4491bd25c..d8c2516cb1 100644 --- a/docs/sources/flow/reference/components/remote.vault.md +++ b/docs/sources/reference/components/remote.vault.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/latest/flow/reference/components/remote.vault/ -- /docs/grafana-cloud/agent/flow/reference/components/remote.vault/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/remote.vault/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/remote.vault/ -- /docs/grafana-cloud/send-data/agent/flow/reference/components/remote.vault/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/components/remote.vault/ +canonical: https://grafana.com/docs/alloy/latest/reference/components/remote.vault/ description: Learn about remote.vault title: remote.vault --- @@ -39,12 +33,12 @@ remote.vault "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`server` | `string` | The Vault server to connect to. | | yes -`namespace` | `string` | The Vault namespace to connect to (Vault Enterprise only). | | no -`path` | `string` | The path to retrieve a secret from. | | yes -`reread_frequency` | `duration` | Rate to re-read keys. | `"0s"` | no +Name | Type | Description | Default | Required +-------------------|------------|------------------------------------------------------------|---------|--------- +`server` | `string` | The Vault server to connect to. | | yes +`namespace` | `string` | The Vault namespace to connect to (Vault Enterprise only). | | no +`path` | `string` | The path to retrieve a secret from. | | yes +`reread_frequency` | `duration` | Rate to re-read keys. | `"0s"` | no Tokens with a lease will be automatically renewed roughly two-thirds through their lease duration. If the leased token isn't renewable, or renewing the @@ -58,18 +52,18 @@ at a frequency specified by the `reread_frequency` argument. Setting The following blocks are supported inside the definition of `remote.vault`: -Hierarchy | Block | Description | Required ---------- | ----- | ----------- | -------- -client_options | [client_options][] | Options for the Vault client. | no -auth.token | [auth.token][] | Authenticate to Vault with a token. | no -auth.approle | [auth.approle][] | Authenticate to Vault using AppRole. | no -auth.aws | [auth.aws][] | Authenticate to Vault using AWS. | no -auth.azure | [auth.azure][] | Authenticate to Vault using Azure. | no -auth.gcp | [auth.gcp][] | Authenticate to Vault using GCP. | no -auth.kubernetes | [auth.kubernetes][] | Authenticate to Vault using Kubernetes. | no -auth.ldap | [auth.ldap][] | Authenticate to Vault using LDAP. | no -auth.userpass | [auth.userpass][] | Authenticate to Vault using a username and password. | no -auth.custom | [auth.custom][] | Authenticate to Vault with custom authentication. | no +Hierarchy | Block | Description | Required +----------------|---------------------|------------------------------------------------------|--------- +client_options | [client_options][] | Options for the Vault client. | no +auth.token | [auth.token][] | Authenticate to Vault with a token. | no +auth.approle | [auth.approle][] | Authenticate to Vault using AppRole. | no +auth.aws | [auth.aws][] | Authenticate to Vault using AWS. | no +auth.azure | [auth.azure][] | Authenticate to Vault using Azure. | no +auth.gcp | [auth.gcp][] | Authenticate to Vault using GCP. | no +auth.kubernetes | [auth.kubernetes][] | Authenticate to Vault using Kubernetes. | no +auth.ldap | [auth.ldap][] | Authenticate to Vault using LDAP. | no +auth.userpass | [auth.userpass][] | Authenticate to Vault using a username and password. | no +auth.custom | [auth.custom][] | Authenticate to Vault with custom authentication. | no Exactly one `auth.*` block **must** be provided, otherwise the component will fail to load. @@ -89,12 +83,12 @@ fail to load. The `client_options` block customizes the connection to vault. -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- +Name | Type | Description | Default | Required +-----------------|------------|-------------------------------------------------------|------------|--------- `min_retry_wait` | `duration` | Minimum time to wait before retrying failed requests. | `"1000ms"` | no `max_retry_wait` | `duration` | Maximum time to wait before retrying failed requests. | `"1500ms"` | no -`max_retries` | `int` | Maximum number of times to retry after a 5xx error. | `2` | no -`timeout` | `duration` | Maximum time to wait before a request times out. | `"60s"` | no +`max_retries` | `int` | Maximum number of times to retry after a 5xx error. | `2` | no +`timeout` | `duration` | Maximum time to wait before a request times out. | `"60s"` | no Requests which fail due to server errors (HTTP 5xx error codes) can be retried. The `max_retries` argument specifies how many times to retry failed requests. @@ -284,7 +278,7 @@ nonsensitive(remote.vault.LABEL.data.KEY_NAME) Using `nonsensitive` allows for using the exports of `remote.vault` for attributes in components that do not support secrets. -[nonsensitive]: {{< relref "../stdlib/nonsensitive.md" >}} +[nonsensitive]: ../../stdlib/nonsensitive/ ## Component health diff --git a/docs/sources/reference/config-blocks/_index.md b/docs/sources/reference/config-blocks/_index.md new file mode 100644 index 0000000000..e3ac635534 --- /dev/null +++ b/docs/sources/reference/config-blocks/_index.md @@ -0,0 +1,17 @@ +--- +aliases: +- ./reference/config-blocks/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/ +description: Learn about configuration blocks +title: Configuration blocks +weight: 200 +--- + +# Configuration blocks + +Configuration blocks are optional top-level blocks that can be used to configure various parts of the {{< param "PRODUCT_NAME" >}} process. +Each configuration block can only be defined once. + +Configuration blocks are _not_ components, so they have no exports. + +{{< section >}} diff --git a/docs/sources/flow/reference/config-blocks/argument.md b/docs/sources/reference/config-blocks/argument.md similarity index 71% rename from docs/sources/flow/reference/config-blocks/argument.md rename to docs/sources/reference/config-blocks/argument.md index 758ec1d5ee..ff265c9e31 100644 --- a/docs/sources/flow/reference/config-blocks/argument.md +++ b/docs/sources/reference/config-blocks/argument.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/argument/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/argument/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/argument/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/argument/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/argument/ +- ./reference/config-blocks/argument/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/argument/ description: Learn about the argument configuration block menuTitle: argument title: argument block @@ -21,7 +18,7 @@ The `argument` block may only be specified inside the definition of [a `declare` In [classic modules][], the `argument` block is valid as a top-level block in a classic module. Classic modules are deprecated and scheduled to be removed in the release after v0.40. -[classic modules]: https://grafana.com/docs/agent//flow/concepts/modules/#classic-modules-deprecated +[classic modules]: ../../../concepts/modules/#classic-modules-deprecated {{< /admonition >}} ## Example @@ -81,9 +78,5 @@ declare "self_collect" { } ``` -{{% docs/reference %}} -[custom component]: "/docs/agent/ -> /docs/agent//flow/concepts/custom_components" -[custom component]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/custom_components" -[declare]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/declare" -[declare]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/declare" -{{% /docs/reference %}} +[custom component]: ../../../concepts/custom_components/ +[declare]: ../..//config-blocks/declare/ diff --git a/docs/sources/flow/reference/config-blocks/declare.md b/docs/sources/reference/config-blocks/declare.md similarity index 56% rename from docs/sources/flow/reference/config-blocks/declare.md rename to docs/sources/reference/config-blocks/declare.md index f4f6f455a4..d8f8179eee 100644 --- a/docs/sources/flow/reference/config-blocks/declare.md +++ b/docs/sources/reference/config-blocks/declare.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/declare/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/declare/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/declare/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/declare/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/declare/ +- ./flow/reference/config-blocks/declare/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/declare/ description: Learn about the declare configuration block menuTitle: declare title: declare block @@ -73,13 +70,8 @@ prometheus.remote_write "example" { } ``` -{{% docs/reference %}} -[argument]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/argument" -[argument]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/argument" -[export]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/export" -[export]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/export" -[declare]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/declare" -[declare]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/declare" -[import]: "/docs/agent/ -> /docs/agent//flow/concepts/modules#importing-modules" -[import]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/modules#importing-modules" -{{% /docs/reference %}} +[argument]: ../argument/ +[export]: ../export/ +[declare]: ../declare/ +[import]: ../../../concepts/modules/#importing-modules +[custom component]: ../../../concepts/custom_components/ diff --git a/docs/sources/flow/reference/config-blocks/export.md b/docs/sources/reference/config-blocks/export.md similarity index 57% rename from docs/sources/flow/reference/config-blocks/export.md rename to docs/sources/reference/config-blocks/export.md index 0b119e4b63..4b28a6497d 100644 --- a/docs/sources/flow/reference/config-blocks/export.md +++ b/docs/sources/reference/config-blocks/export.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/export/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/export/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/export/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/export/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/export/ +- ./reference/config-blocks/export/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/export/ description: Learn about the export configuration block menuTitle: export title: export block @@ -18,9 +15,10 @@ title: export block The `export` block may only be specified inside the definition of [a `declare` block][declare]. {{< admonition type="note" >}} -In [classic modules][], the `export` block is valid as a top-level block in a classic module. Classic modules are deprecated and scheduled to be removed in the release after v0.40. +In [classic modules][], the `export` block is valid as a top-level block in a classic module. +Classic modules are deprecated and scheduled to be removed in the release after v0.40. -[classic modules]: https://grafana.com/docs/agent//flow/concepts/modules/#classic-modules-deprecated +[classic modules]: ../../../concepts/modules/#classic-modules-deprecated {{< /admonition >}} ## Example @@ -69,9 +67,5 @@ declare "pods_and_nodes" { } ``` -{{% docs/reference %}} -[custom component]: "/docs/agent/ -> /docs/agent//flow/concepts/custom_components" -[custom component]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/custom_components" -[declare]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/declare" -[declare]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/declare" -{{% /docs/reference %}} +[custom component]: ../../../concepts/custom_components/ +[declare]: ../declare/ diff --git a/docs/sources/flow/reference/config-blocks/http.md b/docs/sources/reference/config-blocks/http.md similarity index 94% rename from docs/sources/flow/reference/config-blocks/http.md rename to docs/sources/reference/config-blocks/http.md index 03a52010a8..c0718a760f 100644 --- a/docs/sources/flow/reference/config-blocks/http.md +++ b/docs/sources/reference/config-blocks/http.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/http/ +- ./reference/config-blocks/http/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/http/ description: Learn about the http configuration block menuTitle: http title: http block @@ -41,11 +38,6 @@ tls > windows_certificate_filter | [windows_certificate_filter][] | Con tls > windows_certificate_filter > client | [client][] | Configure client certificates for Windows certificate filter. | no tls > windows_certificate_filter > server | [server][] | Configure server certificates for Windows certificate filter. | no -[tls]: #tls-block -[windows_certificate_filter]: #windows-certificate-filter-block -[server]: #server-block -[client]: #client-block - ### tls block The `tls` block configures TLS settings for the HTTP server. @@ -71,9 +63,7 @@ Name | Type | Description `min_version` | `string` | Oldest TLS version to accept from clients. | `""` | no `max_version` | `string` | Newest TLS version to accept from clients. | `""` | no -When the `tls` block is specified, arguments for the TLS certificate (using -`cert_pem` or `cert_file`) and for the TLS key (using `key_pem` or `key_file`) -are required. +When the `tls` block is specified, arguments for the TLS certificate (using `cert_pem` or `cert_file`) and for the TLS key (using `key_pem` or `key_file`) are required. The following pairs of arguments are mutually exclusive, and only one may be configured at a time: @@ -120,9 +110,9 @@ The set of cipher suites specified may be from the following: | `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256` | no | | `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256` | no | -The `curve_preferences` argument determines the set of elliptic curves to -prefer during a handshake in preference order. If not provided, a default list -is used. The set of elliptic curves specified may be from the following: +The `curve_preferences` argument determines the set of elliptic curves to prefer during a handshake in preference order. +If not provided, a default list is used. +The set of elliptic curves specified may be from the following: | Curve | Allowed in `boringcrypto` builds | | ----------- | -------------------------------- | @@ -186,3 +176,8 @@ Name | Type | Description `issuer_common_names` | `list(string)` | Issuer common names to check against. | | no `subject_regex` | `string` | Regular expression to match Subject name. | `""` | no `template_id` | `string` | Client Template ID to match in ASN1 format, for example, "1.2.3". | `""` | no + +[tls]: #tls-block +[windows_certificate_filter]: #windows-certificate-filter-block +[server]: #server-block +[client]: #client-block diff --git a/docs/sources/flow/reference/config-blocks/import.file.md b/docs/sources/reference/config-blocks/import.file.md similarity index 63% rename from docs/sources/flow/reference/config-blocks/import.file.md rename to docs/sources/reference/config-blocks/import.file.md index 8958c00c5e..60a6cee183 100644 --- a/docs/sources/flow/reference/config-blocks/import.file.md +++ b/docs/sources/reference/config-blocks/import.file.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/import.file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/import.file/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/import.file/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/import.file/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/import.file/ +- ./reference/config-blocks/import.file/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/import.file/ description: Learn about the import.file configuration block labels: stage: beta @@ -13,7 +10,7 @@ title: import.file # import.file -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} The `import.file` block imports custom components from a file and exposes them to the importer. `import.file` blocks must be given a label that determines the namespace where custom components are exposed. @@ -36,7 +33,7 @@ Name | Type | Description `detector` | `string` | Which file change detector to use (fsnotify, poll). | `"fsnotify"` | no `poll_frequency` | `duration` | How often to poll for file changes. | `"1m"` | no -{{< docs/shared lookup="flow/reference/components/local-file-arguments-text.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/local-file-arguments-text.md" source="alloy" version="" >}} ## Example @@ -67,8 +64,3 @@ math.add "default" { } ``` {{< /collapse >}} - -{{% docs/reference %}} -[module]: "/docs/agent/ -> /docs/agent//flow/concepts/modules" -[module]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/modules" -{{% /docs/reference %}} diff --git a/docs/sources/flow/reference/config-blocks/import.git.md b/docs/sources/reference/config-blocks/import.git.md similarity index 71% rename from docs/sources/flow/reference/config-blocks/import.git.md rename to docs/sources/reference/config-blocks/import.git.md index b08852ff29..f7b7f724b1 100644 --- a/docs/sources/flow/reference/config-blocks/import.git.md +++ b/docs/sources/reference/config-blocks/import.git.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/import.git/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/import.git/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/import.git/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/import.git/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/import.git/ +- ./reference/config-blocks/import.git/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/import.git/ description: Learn about the import.git configuration block labels: stage: beta @@ -13,7 +10,7 @@ title: import.git # import.git -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} The `import.git` block imports custom components from a Git repository and exposes them to the importer. `import.git` blocks must be given a label that determines the namespace where custom components are exposed. @@ -38,18 +35,13 @@ Name | Type | Description `path` | `string` | The path in the repository where the module is stored. | | yes `pull_frequency` | `duration` | The frequency to pull the repository for updates. | `"60s"` | no -The `repository` attribute must be set to a repository address that would be -recognized by Git with a `git clone REPOSITORY_ADDRESS` command, such as -`https://github.com/grafana/agent.git`. +The `repository` attribute must be set to a repository address that would be recognized by Git with a `git clone REPOSITORY_ADDRESS` command, such as `https://github.com/grafana/alloy.git`. -You must set the `repository` attribute to a repository address that Git would recognize -with a `git clone REPOSITORY_ADDRESS` command, such as `https://github.com/grafana/agent.git`. +You must set the `repository` attribute to a repository address that Git would recognize with a `git clone REPOSITORY_ADDRESS` command, such as `https://github.com/grafana/alloy.git`. -When provided, the `revision` attribute must be set to a valid branch, tag, or -commit SHA within the repository. +When provided, the `revision` attribute must be set to a valid branch, tag, or commit SHA within the repository. -You must set the `path` attribute to a path accessible from the repository's root, -such as `FILE_NAME.river` or `FOLDER_NAME/FILE_NAME.river`. +You must set the `path` attribute to a path accessible from the repository's root, such as `FILE_NAME.river` or `FOLDER_NAME/FILE_NAME.river`. If `pull_frequency` isn't `"0s"`, the Git repository is pulled for updates at the frequency specified. If it's set to `"0s"`, the Git repository is pulled once on init. @@ -69,7 +61,7 @@ ssh_key | [ssh_key][] | Configure an SSH Key for authenticating to the rep ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### ssh_key block @@ -99,8 +91,3 @@ math.add "default" { [basic_auth]: #basic_auth-block [ssh_key]: #ssh_key-block - -{{% docs/reference %}} -[module]: "/docs/agent/ -> /docs/agent//flow/concepts/modules" -[module]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/modules" -{{% /docs/reference %}} diff --git a/docs/sources/flow/reference/config-blocks/import.http.md b/docs/sources/reference/config-blocks/import.http.md similarity index 72% rename from docs/sources/flow/reference/config-blocks/import.http.md rename to docs/sources/reference/config-blocks/import.http.md index c04ae1711c..c788166f81 100644 --- a/docs/sources/flow/reference/config-blocks/import.http.md +++ b/docs/sources/reference/config-blocks/import.http.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/import.http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/import.http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/import.http/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/import.http/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/import.http/ +- ./reference/config-blocks/import.http/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/import.http/ description: Learn about the import.http configuration block labels: stage: beta @@ -13,7 +10,7 @@ title: import.http # import.http -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} `import.http` retrieves a module from an HTTP server. diff --git a/docs/sources/flow/reference/config-blocks/import.string.md b/docs/sources/reference/config-blocks/import.string.md similarity index 65% rename from docs/sources/flow/reference/config-blocks/import.string.md rename to docs/sources/reference/config-blocks/import.string.md index b5ee71c4c6..8259a11b3c 100644 --- a/docs/sources/flow/reference/config-blocks/import.string.md +++ b/docs/sources/reference/config-blocks/import.string.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/import.string/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/import.string/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/import.string/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/import.string/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/import.string/ +- ./reference/config-blocks/import.string/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/import.string/ description: Learn about the import.string configuration block labels: stage: beta @@ -13,7 +10,7 @@ title: import.string # import.string -{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} +{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} The `import.string` block imports custom components from a string and exposes them to the importer. `import.string` blocks must be given a label that determines the namespace where custom components are exposed. @@ -59,8 +56,3 @@ math.add "default" { b = 45 } ``` - -{{% docs/reference %}} -[module]: "/docs/agent/ -> /docs/agent//flow/concepts/modules" -[module]:"/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/modules" -{{% /docs/reference %}} diff --git a/docs/sources/flow/reference/config-blocks/logging.md b/docs/sources/reference/config-blocks/logging.md similarity index 85% rename from docs/sources/flow/reference/config-blocks/logging.md rename to docs/sources/reference/config-blocks/logging.md index 23f3e84e90..090cd64ecc 100644 --- a/docs/sources/flow/reference/config-blocks/logging.md +++ b/docs/sources/reference/config-blocks/logging.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/logging/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/logging/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/logging/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/logging/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/logging/ +- ./reference/config-blocks/logging/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/logging/ description: Learn about the logging configuration block menuTitle: logging title: logging block @@ -50,15 +47,11 @@ The following strings are recognized as valid log line formats: * `"logfmt"`: Write logs as [logfmt][] lines. * `"json"`: Write logs as JSON objects. -[logfmt]: https://brandur.org/logfmt - ### Log receivers The `write_to` argument allows {{< param "PRODUCT_NAME" >}} to tee its log entries to one or more `loki.*` component log receivers in addition to the default [location][]. This, for example can be the export of a `loki.write` component to ship log entries directly to Loki, or a `loki.relabel` component to add a certain label first. -[location]: #log-location - ## Log location {{< param "PRODUCT_NAME" >}} writes all logs to `stderr`. @@ -70,3 +63,6 @@ When running {{< param "PRODUCT_NAME" >}} as a container, view logs written to ` When running {{< param "PRODUCT_NAME" >}} as a Windows service, logs are instead written as event logs. You can view the logs through Event Viewer. In other cases, redirect `stderr` of the {{< param "PRODUCT_NAME" >}} process to a file for logs to persist on disk. + +[logfmt]: https://brandur.org/logfmt +[location]: #log-location diff --git a/docs/sources/flow/reference/config-blocks/remotecfg.md b/docs/sources/reference/config-blocks/remotecfg.md similarity index 71% rename from docs/sources/flow/reference/config-blocks/remotecfg.md rename to docs/sources/reference/config-blocks/remotecfg.md index a175c9e169..233b350903 100644 --- a/docs/sources/flow/reference/config-blocks/remotecfg.md +++ b/docs/sources/reference/config-blocks/remotecfg.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/remotecfg/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/remotecfg/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/remotecfg/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/remotecfg/ -canonical: remotecfgs://grafana.com/docs/agent/latest/flow/reference/config-blocks/remotecfg/ +- ./reference/config-blocks/remotecfg/ +canonical: remotecfgs://grafana.com/docs/alloy/latest/reference/config-blocks/remotecfg/ description: Learn about the remotecfg configuration block menuTitle: remotecfg title: remotecfg block @@ -18,8 +15,7 @@ title: remotecfg block The [API definition][] for managing and fetching configuration that the `remotecfg` block uses is available under the Apache 2.0 license. > **BETA**: The `remotecfg` enables [beta][] functionality. -> Beta features are subject to breaking changes, and may be replaced with -> equivalent functionality that cover the same use case. +> Beta features are subject to breaking changes, and may be replaced with equivalent functionality that cover the same use case. ## Example @@ -50,9 +46,7 @@ Name | Type | Description If the `url` is not set, then the service block is a no-op. -If not set, the self-reported `id` that the Agent uses is a randomly generated, -anonymous unique ID (UUID) that is stored as an `agent_seed.json` file in the -Agent's storage path so that it can persist across restarts. +If not set, the self-reported `id` that {{< param "PRODUCT_NAME" >}} uses is a randomly generated, anonymous unique ID (UUID) that is stored as an `agent_seed.json` file in {{< param "PRODUCT_NAME" >}}'s storage path so that it can persist across restarts. The `id` and `metadata` fields are used in the periodic request sent to the remote endpoint so that the API can decide what configuration to serve. @@ -74,22 +68,22 @@ For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside ### basic_auth block -{{< docs/shared lookup="flow/reference/components/basic-auth-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/basic-auth-block.md" source="alloy" version="" >}} ### authorization block -{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/authorization-block.md" source="alloy" version="" >}} ### oauth2 block -{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/oauth2-block.md" source="alloy" version="" >}} ### tls_config block -{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} +{{< docs/shared lookup="reference/components/tls-config-block.md" source="alloy" version="" >}} [API definition]: https://github.com/grafana/agent-remote-config -[beta]: https://grafana.com/docs/agent//stability/#beta +[beta]: ../../../stability/#beta [basic_auth]: #basic_auth-block [authorization]: #authorization-block [oauth2]: #oauth2-block diff --git a/docs/sources/flow/reference/config-blocks/tracing.md b/docs/sources/reference/config-blocks/tracing.md similarity index 60% rename from docs/sources/flow/reference/config-blocks/tracing.md rename to docs/sources/reference/config-blocks/tracing.md index 860c8e4c79..19bdcc28cc 100644 --- a/docs/sources/flow/reference/config-blocks/tracing.md +++ b/docs/sources/reference/config-blocks/tracing.md @@ -1,10 +1,7 @@ --- aliases: -- /docs/grafana-cloud/agent/flow/reference/config-blocks/tracing/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/config-blocks/tracing/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/config-blocks/tracing/ -- /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/tracing/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/config-blocks/tracing/ +- ./reference/config-blocks/tracing/ +canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/tracing/ description: Learn about the tracing configuration block menuTitle: tracing title: tracing block @@ -50,13 +47,12 @@ processing. The elements in the array can be any `otelcol` component that accept traces, including processors and exporters. When `write_to` is set to an empty array `[]`, all traces are dropped. -> **NOTE**: Any traces generated before the `tracing` block has been evaluated, -> such as at the early start of the process' lifetime, are dropped. +{{< admonition type="note" >}} +Any traces generated before the `tracing` block has been evaluated,such as at the early start of the process' lifetime, are dropped. +{{< /admonition >}} -The `sampling_fraction` argument controls what percentage of generated traces -should be sent to the consumers specified by `write_to`. When set to `1` or -greater, 100% of traces are kept. When set to `0` or lower, 0% of traces are -kept. +The `sampling_fraction` argument controls what percentage of generated traces should be sent to the consumers specified by `write_to`. +When set to `1` or greater, 100% of traces are kept. When set to `0` or lower, 0% of traces are kept. ## Blocks @@ -67,25 +63,18 @@ Hierarchy | Block | Description sampler | [sampler][] | Define custom sampling on top of the base sampling fraction. | no sampler > jaeger_remote | [jaeger_remote][] | Retrieve sampling information via a Jaeger remote sampler. | no -The `>` symbol indicates deeper levels of nesting. For example, `sampler > -jaeger_remote` refers to a `jaeger_remote` block defined inside an `sampler` -block. - -[sampler]: #sampler-block -[jaeger_remote]: #jaeger_remote-block +The `>` symbol indicates deeper levels of nesting. For example, `sampler > jaeger_remote` refers to a `jaeger_remote` block defined inside an `sampler` block. ### sampler block -The `sampler` block contains a definition of a custom sampler to use. The -`sampler` block supports no arguments and is controlled fully through inner -blocks. +The `sampler` block contains a definition of a custom sampler to use. +The `sampler` block supports no arguments and is controlled fully through inner blocks. -It is invalid to define more than one sampler to use in the `sampler` block. +It's invalid to define more than one sampler to use in the `sampler` block. ### jaeger_remote block -The `jaeger_remote` block configures the retrieval of sampling information -through a remote server that exposes Jaeger sampling strategies. +The `jaeger_remote` block configures the retrieval of sampling information through a remote server that exposes Jaeger sampling strategies. Name | Type | Description | Default | Required -------------------|------------|------------------------------------------------------------|------------------------------------|--------- @@ -93,24 +82,23 @@ Name | Type | Description `max_operations` | `number` | Limit number of operations which can have custom sampling. | `256` | no `refresh_interval` | `duration` | Frequency to poll the URL for new sampling strategies. | `"1m"` | no -The remote sampling strategies are retrieved from the URL specified by the -`url` argument, and polled for updates on a timer. The frequency for how oftenName | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`names` | `list(string)` | DNS names to look up. | | yes -`port` | `number` | Port to use for collecting metrics. Not used for SRV records. | `0` | no -`refresh_interval` | `duration` | How often to query DNS for updates. | `"30s"` | no -`type` | `string` | Type of DNS record to query. Must be one of SRV, A, AAAA, or MX. | `"SRV"` | no -polling occurs is controlled by the `refresh_interval` argument. - -Requests to the remote sampling strategies server are made through an HTTP -`GET` request to the configured `url` argument. A `service=grafana-agent` query -parameter is always added to the URL to allow the server to respond with -service-specific strategies. The HTTP response body is read as JSON matching -the schema specified by Jaeger's [`strategies.json` file][Jaeger sampling -strategies]. - -The `max_operations` limits the amount of custom span names that can have -custom sampling rules. If the remote sampling strategy exceeds the limit, -sampling decisions fall back to the default sampler. +The remote sampling strategies are retrieved from the URL specified by the `url` argument, and polled for updates on a timer. +The frequency for how often polling occurs is controlled by the `refresh_interval` argument. + +Name | Type | Description | Default | Required +-------------------|----------------|------------------------------------------------------------------|---------|--------- +`names` | `list(string)` | DNS names to look up. | | yes +`port` | `number` | Port to use for collecting metrics. Not used for SRV records. | `0` | no +`refresh_interval` | `duration` | How often to query DNS for updates. | `"30s"` | no +`type` | `string` | Type of DNS record to query. Must be one of SRV, A, AAAA, or MX. | `"SRV"` | no + +Requests to the remote sampling strategies server are made through an HTTP `GET` request to the configured `url` argument. +A `service=grafana-agent` query parameter is always added to the URL to allow the server to respond with service-specific strategies. +The HTTP response body is read as JSON matching the schema specified by Jaeger's [`strategies.json` file][Jaeger sampling strategies]. + +The `max_operations` limits the amount of custom span names that can have custom sampling rules. +If the remote sampling strategy exceeds the limit, sampling decisions fall back to the default sampler. [Jaeger sampling strategies]: https://www.jaegertracing.io/docs/1.22/sampling/#collector-sampling-configuration +[sampler]: #sampler-block +[jaeger_remote]: #jaeger_remote-block diff --git a/docs/sources/reference/stdlib/_index.md b/docs/sources/reference/stdlib/_index.md new file mode 100644 index 0000000000..489dfadadb --- /dev/null +++ b/docs/sources/reference/stdlib/_index.md @@ -0,0 +1,19 @@ +--- +aliases: +- ./reference/stdlib/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/ +description: The standard library is a list of functions used in expressions when assigning values to attributes +title: Standard library +weight: 400 +--- + +# Standard library + +The standard library is a list of functions you can use in expressions when assigning values to attributes. + +All standard library functions are [pure functions][]. +The functions always return the same output if given the same input. + +{{< section >}} + +[pure functions]: https://en.wikipedia.org/wiki/Pure_function diff --git a/docs/sources/reference/stdlib/coalesce.md b/docs/sources/reference/stdlib/coalesce.md new file mode 100644 index 0000000000..a089644c90 --- /dev/null +++ b/docs/sources/reference/stdlib/coalesce.md @@ -0,0 +1,24 @@ +--- +aliases: +- ./reference/stdlib/coalesce/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/coalesce/ +description: Learn about coalesce +title: coalesce +--- + +# coalesce + +`coalesce` takes any number of arguments and returns the first one that isn't null, an empty string, empty list, or an empty object. +It's useful for obtaining a default value, such as if an environment variable isn't defined. +If no argument is non-empty or non-zero, the last argument is returned. + +## Examples + +``` +> coalesce("a", "b") +a +> coalesce("", "b") +b +> coalesce(env("DOES_NOT_EXIST"), "c") +c +``` diff --git a/docs/sources/reference/stdlib/concat.md b/docs/sources/reference/stdlib/concat.md new file mode 100644 index 0000000000..c233fbaa2c --- /dev/null +++ b/docs/sources/reference/stdlib/concat.md @@ -0,0 +1,29 @@ +--- +aliases: +- ./reference/stdlib/concat/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/concat/ +description: Learn about concat +title: concat +--- + +# concat + +The `concat` function concatenates one or more lists of values into a single list. +Each argument to `concat` must be a list value. +Elements within the list can be any type. + +## Examples + +``` +> concat([]) +[] + +> concat([1, 2], [3, 4]) +[1, 2, 3, 4] + +> concat([1, 2], [], [bool, null]) +[1, 2, bool, null] + +> concat([[1, 2], [3, 4]], [[5, 6]]) +[[1, 2], [3, 4], [5, 6]] +``` diff --git a/docs/sources/reference/stdlib/constants.md b/docs/sources/reference/stdlib/constants.md new file mode 100644 index 0000000000..3e9e394af2 --- /dev/null +++ b/docs/sources/reference/stdlib/constants.md @@ -0,0 +1,28 @@ +--- +aliases: +- ./reference/stdlib/constants/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/constants/ +description: Learn about constants +title: constants +--- + +# constants + +The `constants` object exposes a list of constant values about the system {{< param "PRODUCT_NAME" >}} is running on: + +* `constants.hostname`: The hostname of the machine {{< param "PRODUCT_NAME" >}} is running on. +* `constants.os`: The operating system {{< param "PRODUCT_NAME" >}} is running on. +* `constants.arch`: The architecture of the system {{< param "PRODUCT_NAME" >}} is running on. + +## Examples + +``` +> constants.hostname +"my-hostname" + +> constants.os +"linux" + +> constants.arch +"amd64" +``` diff --git a/docs/sources/reference/stdlib/env.md b/docs/sources/reference/stdlib/env.md new file mode 100644 index 0000000000..84a68ed3b9 --- /dev/null +++ b/docs/sources/reference/stdlib/env.md @@ -0,0 +1,22 @@ +--- +aliases: +- ./reference/stdlib/env/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/env/ +description: Learn about env +title: env +--- + +# env + +The `env` function gets the value of an environment variable from the system {{< param "PRODUCT_NAME" >}} is running on. +If the environment variable does not exist, `env` returns an empty string. + +## Examples + +``` +> env("HOME") +"/home/grafana-agent" + +> env("DOES_NOT_EXIST") +"" +``` diff --git a/docs/sources/flow/reference/stdlib/format.md b/docs/sources/reference/stdlib/format.md similarity index 66% rename from docs/sources/flow/reference/stdlib/format.md rename to docs/sources/reference/stdlib/format.md index be5d9cd754..4f5d227cd8 100644 --- a/docs/sources/flow/reference/stdlib/format.md +++ b/docs/sources/reference/stdlib/format.md @@ -1,20 +1,15 @@ --- aliases: -- ../../configuration-language/standard-library/format/ -- /docs/grafana-cloud/agent/flow/reference/stdlib/format/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/stdlib/format/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/stdlib/format/ -- /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/format/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/stdlib/format/ +- ./reference/stdlib/format/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/format/ description: Learn about format title: format --- # format -The `format` function produces a string by formatting a number of other values according -to a specification string. It is similar to the `printf` function in C, and -other similar functions in other programming languages. +The `format` function produces a string by formatting a number of other values according to a specification string. +It's similar to the `printf` function in C, and other similar functions in other programming languages. ```river format(spec, values...) @@ -33,21 +28,16 @@ The `format` function is most useful when you use more complex format specificat ## Specification Syntax -The specification is a string that includes formatting verbs that are introduced -with the `%` character. The function call must then have one additional argument -for each verb sequence in the specification. The verbs are matched with -consecutive arguments and formatted as directed, as long as each given argument -is convertible to the type required by the format verb. +The specification is a string that includes formatting verbs that are introduced with the `%` character. +The function call must then have one additional argument for each verb sequence in the specification. +The verbs are matched with consecutive arguments and formatted as directed, as long as each given argument is convertible to the type required by the format verb. By default, `%` sequences consume successive arguments starting with the first. -Introducing a `[n]` sequence immediately before the verb letter, where `n` is a -decimal integer, explicitly chooses a particular value argument by its -one-based index. Subsequent calls without an explicit index will then proceed -with `n`+1, `n`+2, etc. +Introducing a `[n]` sequence immediately before the verb letter, where `n` is a decimal integer, explicitly chooses a particular value argument by its one-based index. +Subsequent calls without an explicit index will then proceed with `n`+1, `n`+2, etc. -The function produces an error if the format string requests an impossible -conversion or accesses more arguments than are given. An error is also produced -for an unsupported format verb. +The function produces an error if the format string requests an impossible conversion or accesses more arguments than are given. +An error is also produced for an unsupported format verb. ### Verbs diff --git a/docs/sources/reference/stdlib/join.md b/docs/sources/reference/stdlib/join.md new file mode 100644 index 0000000000..fdec1cbf0e --- /dev/null +++ b/docs/sources/reference/stdlib/join.md @@ -0,0 +1,26 @@ +--- +aliases: +- ./reference/stdlib/join/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/join/ +description: Learn about join +title: join +--- + +# join + +`join` all items in an array into a string, using a character as separator. + +```river +join(list, separator) +``` + +## Examples + +```river +> join(["foo", "bar", "baz"], "-") +"foo-bar-baz" +> join(["foo", "bar", "baz"], ", ") +"foo, bar, baz" +> join(["foo"], ", ") +"foo" +``` diff --git a/docs/sources/reference/stdlib/json_decode.md b/docs/sources/reference/stdlib/json_decode.md new file mode 100644 index 0000000000..6bc68ca250 --- /dev/null +++ b/docs/sources/reference/stdlib/json_decode.md @@ -0,0 +1,41 @@ +--- +aliases: +- ./reference/stdlib/json_decode/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/json_decode/ +description: Learn about json_decode +title: json_decode +--- + +# json_decode + +The `json_decode` function decodes a string representing JSON into a River value. +`json_decode` fails if the string argument provided can't be parsed as JSON. + +A common use case of `json_decode` is to decode the output of a [`local.file`][] component to a River value. + +> Remember to escape double quotes when passing JSON string literals to `json_decode`. +> +> For example, the JSON value `{"key": "value"}` is properly represented by the string `"{\"key\": \"value\"}"`. + +## Examples + +``` +> json_decode("15") +15 + +> json_decode("[1, 2, 3]") +[1, 2, 3] + +> json_decode("null") +null + +> json_decode("{\"key\": \"value\"}") +{ + key = "value", +} + +> json_decode(local.file.some_file.content) +"Hello, world!" +``` + +[`local.file`]: ../../components/local.file/ diff --git a/docs/sources/reference/stdlib/json_path.md b/docs/sources/reference/stdlib/json_path.md new file mode 100644 index 0000000000..dda12738a0 --- /dev/null +++ b/docs/sources/reference/stdlib/json_path.md @@ -0,0 +1,43 @@ +--- +aliases: +- ./reference/stdlib/json_path/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/json_path/ +description: Learn about json_path +title: json_path +--- + +# json_path + +The `json_path` function lookup values using [jsonpath][] syntax. + +The function expects two strings. The first string is the JSON string used look up values. The second string is the JSONPath expression. + +`json_path` always returns a list of values. If the JSONPath expression doesn't match any values, an empty list is returned. + +A common use case of `json_path` is to decode and filter the output of a [`local.file`][] or [`remote.http`][] component to a River value. + +> Remember to escape double quotes when passing JSON string literals to `json_path`. +> +> For example, the JSON value `{"key": "value"}` is properly represented by the string `"{\"key\": \"value\"}"`. + +## Examples + +``` +> json_path("{\"key\": \"value\"}", ".key") +["value"] + + +> json_path("[{\"name\": \"Department\",\"value\": \"IT\"},{\"name\":\"TestStatus\",\"value\":\"Pending\"}]", "[?(@.name == \"Department\")].value") +["IT"] + +> json_path("{\"key\": \"value\"}", ".nonexists") +[] + +> json_path("{\"key\": \"value\"}", ".key")[0] +value + +``` + +[jsonpath]: https://goessner.net/articles/JsonPath/ +[`local.file`]: ../../components/local.file/ +[`remote.http`]: ../../components/remote.http/ diff --git a/docs/sources/reference/stdlib/nonsensitive.md b/docs/sources/reference/stdlib/nonsensitive.md new file mode 100644 index 0000000000..bacea7271e --- /dev/null +++ b/docs/sources/reference/stdlib/nonsensitive.md @@ -0,0 +1,30 @@ +--- +aliases: +- ./reference/stdlib/nonsensitive/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/nonsensitive/ +description: Learn about nonsensitive +title: nonsensitive +--- + +# nonsensitive + +`nonsensitive` converts a [secret][] value back into a string. + +{{< admonition type="warning" >}} +Only use `nonsensitive` when you are positive that the value converted back to a string isn't a sensitive value. + +Strings resulting from calls to `nonsensitive` are displayed in plain text in the UI and internal API calls. +{{< /admonition >}} + +## Examples + +``` +// Assuming `sensitive_value` is a secret: + +> sensitive_value +(secret) +> nonsensitive(sensitive_value) +"Hello, world!" +``` + +[secret]: ../../../concepts/config-language/expressions/types_and_values/#secrets diff --git a/docs/sources/reference/stdlib/replace.md b/docs/sources/reference/stdlib/replace.md new file mode 100644 index 0000000000..89722e6364 --- /dev/null +++ b/docs/sources/reference/stdlib/replace.md @@ -0,0 +1,22 @@ +--- +aliases: +- .reference/stdlib/replace/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/replace/ +description: Learn about replace +title: replace +--- + +# replace + +`replace` searches a string for a substring, and replaces each occurrence of the substring with a replacement string. + +```river +replace(string, substring, replacement) +``` + +## Examples + +```river +> replace("1 + 2 + 3", "+", "-") +"1 - 2 - 3" +``` diff --git a/docs/sources/reference/stdlib/split.md b/docs/sources/reference/stdlib/split.md new file mode 100644 index 0000000000..c033739f90 --- /dev/null +++ b/docs/sources/reference/stdlib/split.md @@ -0,0 +1,28 @@ +--- +aliases: +- ./reference/stdlib/split/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/split/ +description: Learn about split +title: split +--- + +# split + +`split` produces a list by dividing a string at all occurrences of a separator. + +```river +split(list, separator) +``` + +## Examples + +```river +> split("foo,bar,baz", "," ) +["foo", "bar", "baz"] + +> split("foo", ",") +["foo"] + +> split("", ",") +[""] +``` diff --git a/docs/sources/reference/stdlib/to_lower.md b/docs/sources/reference/stdlib/to_lower.md new file mode 100644 index 0000000000..27af0825fb --- /dev/null +++ b/docs/sources/reference/stdlib/to_lower.md @@ -0,0 +1,18 @@ +--- +aliases: +- ./reference/stdlib/to_lower/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/to_lower/ +description: Learn about to_lower +title: to_lower +--- + +# to_lower + +`to_lower` converts all uppercase letters in a string to lowercase. + +## Examples + +```river +> to_lower("HELLO") +"hello" +``` diff --git a/docs/sources/reference/stdlib/to_upper.md b/docs/sources/reference/stdlib/to_upper.md new file mode 100644 index 0000000000..ee8c1509bf --- /dev/null +++ b/docs/sources/reference/stdlib/to_upper.md @@ -0,0 +1,18 @@ +--- +aliases: +- ./reference/stdlib/to_upper/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/to_upper/ +description: Learn about to_upper +title: to_upper +--- + +# to_upper + +`to_upper` converts all lowercase letters in a string to uppercase. + +## Examples + +```river +> to_upper("hello") +"HELLO" +``` diff --git a/docs/sources/reference/stdlib/trim.md b/docs/sources/reference/stdlib/trim.md new file mode 100644 index 0000000000..5f904df6a4 --- /dev/null +++ b/docs/sources/reference/stdlib/trim.md @@ -0,0 +1,28 @@ +--- +aliases: +- ./reference/stdlib/trim/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/trim/ +description: Learn about trim +title: trim +--- + +# trim + +`trim` removes the specified set of characters from the start and end of a string. + +```river +trim(string, str_character_set) +``` + +## Examples + +```river +> trim("?!hello?!", "!?") +"hello" + +> trim("foobar", "far") +"oob" + +> trim(" hello! world.! ", "! ") +"hello! world." +``` diff --git a/docs/sources/reference/stdlib/trim_prefix.md b/docs/sources/reference/stdlib/trim_prefix.md new file mode 100644 index 0000000000..6bb900f2dd --- /dev/null +++ b/docs/sources/reference/stdlib/trim_prefix.md @@ -0,0 +1,19 @@ +--- +aliases: +- ./reference/stdlib/trim_prefix/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/trim_prefix/ +description: Learn about trim_prefix +title: trim_prefix +--- + +# trim_prefix + +`trim_prefix` removes the prefix from the start of a string. +If the string doesn't start with the prefix, the string is returned unchanged. + +## Examples + +```river +> trim_prefix("helloworld", "hello") +"world" +``` diff --git a/docs/sources/reference/stdlib/trim_space.md b/docs/sources/reference/stdlib/trim_space.md new file mode 100644 index 0000000000..06646e56af --- /dev/null +++ b/docs/sources/reference/stdlib/trim_space.md @@ -0,0 +1,18 @@ +--- +aliases: +- ./reference/stdlib/trim_space/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/trim_space/ +description: Learn about trim_space +title: trim_space +--- + +# trim_space + +`trim_space` removes any whitespace characters from the start and end of a string. + +## Examples + +```river +> trim_space(" hello\n\n") +"hello" +``` diff --git a/docs/sources/reference/stdlib/trim_suffix.md b/docs/sources/reference/stdlib/trim_suffix.md new file mode 100644 index 0000000000..d13c596230 --- /dev/null +++ b/docs/sources/reference/stdlib/trim_suffix.md @@ -0,0 +1,18 @@ +--- +aliases: +- ./reference/stdlib/trim_suffix/ +canonical: https://grafana.com/docs/alloy/latest/reference/stdlib/trim_suffix/ +description: Learn about trim_suffix +title: trim_suffix +--- + +# trim_suffix + +`trim_suffix` removes the suffix from the end of a string. + +## Examples + +```river +> trim_suffix("helloworld", "world") +"hello" +``` diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md new file mode 100644 index 0000000000..a665a5010c --- /dev/null +++ b/docs/sources/release-notes.md @@ -0,0 +1,15 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/release-notes/ +description: Release notes for Grafana Alloy +menuTitle: Release notes +title: Release notes for Grafana Alloy +weight: 999 +--- + +# Release notes for {{% param "PRODUCT_NAME" %}} + +The release notes provide information about deprecations and breaking changes in {{< param "PRODUCT_NAME" >}}. + +For a complete list of changes to {{< param "PRODUCT_ROOT_NAME" >}}, with links to pull requests and related issues when available, refer to the [Changelog][]. + +[Changelog]: https://github.com/grafana/alloy/blob/main/CHANGELOG.md \ No newline at end of file diff --git a/docs/sources/shared/deploy-agent.md b/docs/sources/shared/deploy-agent.md deleted file mode 100644 index 1799ea1745..0000000000 --- a/docs/sources/shared/deploy-agent.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -aliases: -- /docs/agent/shared/deploy-agent/ -- /docs/grafana-cloud/agent/shared/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/deploy-agent/ -- /docs/grafana-cloud/send-data/agent/shared/deploy-agent/ -canonical: https://grafana.com/docs/agent/latest/shared/deploy-agent/ -description: Shared content, deployment topologies for Grafana Agent -headless: true -title: Deploy Grafana Agent ---- - -# Deploy Grafana Agent - -Grafana Agent is a flexible, vendor-neutral telemetry collector. This -flexibility means that Grafana Agent doesn’t enforce a specific deployment topology -but can work in multiple scenarios. - -This page lists common topologies used for deployments of Grafana Agent, when -to consider using each topology, issues you may run into, and scaling -considerations. - -## As a centralized collection service -Deploying Grafana Agent as a centralized service is recommended for -collecting application telemetry. This topology allows you to use a smaller number of agents to -coordinate service discovery, collection, and remote writing. - -![centralized-collection](/media/docs/agent/agent-topologies/centralized-collection.png) - -Using this topology requires deploying the Agent on separate infrastructure, -and making sure that agents can discover and reach these applications over the -network. The main predictor for the size of the agent is the number of active -metrics series it is scraping; a rule of thumb is approximately 10 KB of memory for each -series. We recommend you start looking towards horizontal scaling around the 1 million -active series mark. - -### Using Kubernetes StatefulSets -Deploying Grafana Agent as a StatefulSet is the recommended option for metrics -collection. -The persistent pod identifiers make it possible to consistently match volumes -with pods so that you can use them for the WAL directory. - -You can also use a Kubernetes deployment in cases where persistent storage is not required, such as a traces-only pipeline. - -### Pros -* Straightforward scaling using [clustering][] or [hashmod sharding][] -* Minimizes the “noisy neighbor” effect -* Easy to meta-monitor - -### Cons -* Requires running on separate infrastructure - -### Use for -* Scalable telemetry collection - -### Don’t use for -* Host-level metrics and logs - -## As a host daemon -Deploying one Grafana Agent per machine is required for collecting -machine-level metrics and logs, such as node_exporter hardware and network -metrics or journald system logs. - -![daemonset](/media/docs/agent/agent-topologies/daemonset.png) - -Each Grafana Agent requires you to open an outgoing connection for each remote endpoint -it’s shipping data to. This can lead to NAT port exhaustion on the egress -infrastructure. Each egress IP can support up to (65535 - 1024 = 64511) -outgoing connections on different ports. So, if all agents are shipping metrics -and log data, an egress IP can support up to 32,255 agents. - -### Using Kubernetes DaemonSets -The simplest use case of the host daemon topology is a Kubernetes DaemonSet, -and it is required for node-level observability (for example cAdvisor metrics) and -collecting pod logs. - -### Pros -* Doesn’t require running on separate infrastructure -* Typically leads to smaller-sized agents -* Lower network latency to instrumented applications - -### Cons -* Requires planning a process for provisioning Grafana Agent on new machines, as well as keeping configuration up to date to avoid configuration drift -* Not possible to scale agents independently when using Kubernetes DaemonSets -* Scaling the topology can strain external APIs (like service discovery) and network infrastructure (like firewalls, proxy servers, and egress points) - -### Use for -* Collecting machine-level metrics and logs (for example, node_exporter hardware metrics, Kubernetes pod logs) - -### Don’t use for -* Scenarios where Grafana Agent grows so large it can become a noisy neighbor -* Collecting an unpredictable amount of telemetry - -## As a container sidecar -Deploying Grafana Agent as a container sidecar is only recommended for -short-lived applications or specialized agent deployments. - -![daemonset](/media/docs/agent/agent-topologies/sidecar.png) - -### Using Kubernetes pod sidecars -In a Kubernetes environment, the sidecar model consists of deploying Grafana Agent -as an extra container on the pod. The pod’s controller, network configuration, -enabled capabilities, and available resources are shared between the actual -application and the sidecar agent. - -### Pros -* Doesn’t require running on separate infrastructure -* Straightforward networking with partner applications - -### Cons -* Doesn’t scale separately -* Makes resource consumption harder to monitor and predict -* Agents do not have a life cycle of their own, making it harder to reason about things like recovering from network outages - -### Use for -* Serverless services -* Job/batch applications that work with a push model -* Air-gapped applications that can’t be otherwise reached over the network - -### Don’t use for -* Long-lived applications -* Scenarios where the agent size grows so large it can become a noisy neighbor - -[hashmod sharding]: {{< relref "../static/operation-guide/_index.md" >}} -[clustering]: {{< relref "../flow/concepts/clustering.md" >}} diff --git a/docs/sources/shared/deploy-alloy.md b/docs/sources/shared/deploy-alloy.md new file mode 100644 index 0000000000..6c86f737ba --- /dev/null +++ b/docs/sources/shared/deploy-alloy.md @@ -0,0 +1,123 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/deploy-alloy/ +description: Shared content, deployment topologies for Grafana Alloy +headless: true +title: Deploy Grafana Alloy +--- + +# Deploy {{% param "PRODUCT_NAME" %}} + +{{< param "PRODUCT_NAME" >}} is a flexible, vendor-neutral telemetry collector. +This flexibility means that {{< param "PRODUCT_NAME" >}} doesn’t enforce a specific deployment topology but can work in multiple scenarios. + +This page lists common topologies used for deployments of {{% param "PRODUCT_NAME" %}}, when to consider using each topology, issues you may run into, and scaling considerations. + +## As a centralized collection service + +Deploying {{< param "PRODUCT_NAME" >}} as a centralized service is recommended for collecting application telemetry. +This topology allows you to use a smaller number of agents to coordinate service discovery, collection, and remote writing. + +![centralized-collection](/media/docs/agent/agent-topologies/centralized-collection.png) + +Using this topology requires deploying the Agent on separate infrastructure, and making sure that agents can discover and reach these applications over the network. +The main predictor for the size of the agent is the number of active metrics series it is scraping; a rule of thumb is approximately 10 KB of memory for each series. +We recommend you start looking towards horizontal scaling around the 1 million active series mark. + +### Using Kubernetes StatefulSets + +Deploying {{< param "PRODUCT_NAME" >}} as a StatefulSet is the recommended option for metrics collection. +The persistent Pod identifiers make it possible to consistently match volumes with pods so that you can use them for the WAL directory. + +You can also use a Kubernetes Deployment in cases where persistent storage isn't required, such as a traces-only pipeline. + +### Pros + +* Straightforward scaling using [clustering][] or [hashmod sharding][] +* Minimizes the “noisy neighbor” effect +* Easy to meta-monitor + +### Cons + +* Requires running on separate infrastructure + +### Use for + +* Scalable telemetry collection + +### Don’t use for + +* Host-level metrics and logs + +## As a host daemon + +Deploying one {{< param "PRODUCT_NAME" >}} per machine is required for collecting machine-level metrics and logs, such as node_exporter hardware and network metrics or journald system logs. + +![daemonset](/media/docs/agent/agent-topologies/daemonset.png) + +Each {{< param "PRODUCT_NAME" >}} requires you to open an outgoing connection for each remote endpoint it’s shipping data to. +This can lead to NAT port exhaustion on the egress infrastructure. +Each egress IP can support up to (65535 - 1024 = 64511) outgoing connections on different ports. +So, if all {{< param "PRODUCT_NAME" >}}s are shipping metrics and log data, an egress IP can support up to 32,255 agents. + +### Using Kubernetes DaemonSets + +The simplest use case of the host daemon topology is a Kubernetes DaemonSet, and it's required for node-level observability (for example cAdvisor metrics) and collecting Pod logs. + +### Pros + +* Doesn’t require running on separate infrastructure +* Typically leads to smaller-sized agents +* Lower network latency to instrumented applications + +### Cons + +* Requires planning a process for provisioning Grafana Agent on new machines, as well as keeping configuration up to date to avoid configuration drift +* Not possible to scale agents independently when using Kubernetes DaemonSets +* Scaling the topology can strain external APIs (like service discovery) and network infrastructure (like firewalls, proxy servers, and egress points) + +### Use for + +* Collecting machine-level metrics and logs (for example, node_exporter hardware metrics, Kubernetes Pod logs) + +### Don’t use for + +* Scenarios where Grafana Agent grows so large it can become a noisy neighbor +* Collecting an unpredictable amount of telemetry + +## As a container sidecar + +Deploying {{< param "PRODUCT_NAME" >}} as a container sidecar is only recommended for short-lived applications or specialized agent deployments. + +![daemonset](/media/docs/agent/agent-topologies/sidecar.png) + +### Using Kubernetes Pod sidecars + +In a Kubernetes environment, the sidecar model consists of deploying {{< param "PRODUCT_NAME" >}} as an extra container on the Pod. +The Pod’s controller, network configuration, enabled capabilities, and available resources are shared between the actual application and the sidecar agent. + +### Pros + +* Doesn’t require running on separate infrastructure +* Straightforward networking with partner applications + +### Cons + +* Doesn’t scale separately +* Makes resource consumption harder to monitor and predict +* {{< param "PRODUCT_NAME" >}}s don't have a life cycle of their own, making it harder to reason about things like recovering from network outages + +### Use for + +* Serverless services +* Job/batch applications that work with a push model +* Air-gapped applications that can’t be otherwise reached over the network + +### Don’t use for + +* Long-lived applications +* Scenarios where the agent size grows so large it can become a noisy neighbor + + +[hashmod sharding]: https://grafana.com/docs/agent/latest/static/operation-guide/ + +[clustering]: ../../concepts/clustering/ diff --git a/docs/sources/shared/flow/reference/components/azuread-block.md b/docs/sources/shared/flow/reference/components/azuread-block.md deleted file mode 100644 index 07d9743851..0000000000 --- a/docs/sources/shared/flow/reference/components/azuread-block.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/reference/components/azuread-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/azuread-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/azuread-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/azuread-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/azuread-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/azuread-block/ -description: Shared content, azuread block -headless: true ---- - -Name | Type | Description | Default | Required ---------|----------|------------------|-----------------|--------- -`cloud` | `string` | The Azure Cloud. | `"AzurePublic"` | no - -The supported values for `cloud` are: -* `"AzurePublic"` -* `"AzureChina"` -* `"AzureGovernment"` diff --git a/docs/sources/shared/flow/reference/components/exporter-component-exports.md b/docs/sources/shared/flow/reference/components/exporter-component-exports.md deleted file mode 100644 index f1a8ca440c..0000000000 --- a/docs/sources/shared/flow/reference/components/exporter-component-exports.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/reference/components/exporter-component-exports/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/exporter-component-exports/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/exporter-component-exports/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/exporter-component-exports/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/exporter-component-exports/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/exporter-component-exports/ -description: Shared content, exporter component exports -headless: true ---- - -The following fields are exported and can be referenced by other components. - -Name | Type | Description -----------|---------------------|---------------------------------------------------------- -`targets` | `list(map(string))` | The targets that can be used to collect exporter metrics. - -For example, the `targets` can either be passed to a `discovery.relabel` component to rewrite the targets' label sets or to a `prometheus.scrape` component that collects the exposed metrics. - -The exported targets use the configured [in-memory traffic][] address specified by the [run command][]. - -[in-memory traffic]: {{< relref "../../../../flow/concepts/component_controller.md#in-memory-traffic" >}} -[run command]: {{< relref "../../../../flow/reference/cli/run.md" >}} diff --git a/docs/sources/shared/flow/reference/components/http-client-proxy-config-description.md b/docs/sources/shared/flow/reference/components/http-client-proxy-config-description.md deleted file mode 100644 index 700b0dd2cc..0000000000 --- a/docs/sources/shared/flow/reference/components/http-client-proxy-config-description.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/reference/components/http-client-proxy-config-description-args/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/http-client-proxy-config-description-args/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/http-client-proxy-config-description-args/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/http-client-proxy-config-description-args/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/http-client-proxy-config-description-args/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/http-client-proxy-config-description-args/ -description: Shared content, http client config description -headless: true ---- - -`no_proxy` can contain IPs, CIDR notations, and domain names. IP and domain -names can contain port numbers. `proxy_url` must be configured if `no_proxy` -is configured. - -`proxy_from_environment` uses the environment variables HTTP_PROXY, HTTPS_PROXY -and NO_PROXY (or the lowercase versions thereof). Requests use the proxy from -the environment variable matching their scheme, unless excluded by NO_PROXY. -`proxy_url` and `no_proxy` must not be configured if `proxy_from_environment` -is configured. - -`proxy_connect_header` should only be configured if `proxy_url` or `proxy_from_environment` are configured. \ No newline at end of file diff --git a/docs/sources/shared/flow/reference/components/otelcol-compression-field.md b/docs/sources/shared/flow/reference/components/otelcol-compression-field.md deleted file mode 100644 index 394cf1077c..0000000000 --- a/docs/sources/shared/flow/reference/components/otelcol-compression-field.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-compression-field/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-compression-field/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-compression-field/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-compression-field/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-compression-field/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/otelcol-compression-field/ -description: Shared content, otelcol compression field -headless: true ---- - -By default, requests are compressed with gzip. -The `compression` argument controls which compression mechanism to use. Supported strings are: - -* `"gzip"` -* `"zlib"` -* `"deflate"` -* `"snappy"` -* `"zstd"` - -If `compression` is set to `"none"` or an empty string `""`, no compression is used. diff --git a/docs/sources/shared/flow/stability/beta.md b/docs/sources/shared/flow/stability/beta.md deleted file mode 100644 index c337059f00..0000000000 --- a/docs/sources/shared/flow/stability/beta.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/stability/beta/ -- /docs/grafana-cloud/agent/shared/flow/stability/beta/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/stability/beta/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/stability/beta/ -- /docs/grafana-cloud/send-data/agent/shared/flow/stability/beta/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/stability/beta/ -description: Shared content, beta -headless: true ---- - -> **BETA**: This is a [beta][] component. Beta components are subject to breaking -> changes, and may be replaced with equivalent functionality that cover the -> same use case. - -[beta]: {{< relref "../../../stability.md#beta" >}} diff --git a/docs/sources/shared/flow/stability/experimental.md b/docs/sources/shared/flow/stability/experimental.md deleted file mode 100644 index 95d0136400..0000000000 --- a/docs/sources/shared/flow/stability/experimental.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -aliases: -- /docs/agent/shared/flow/stability/experimental/ -- /docs/grafana-cloud/agent/shared/flow/stability/experimental/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/stability/experimental/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/stability/experimental/ -- /docs/grafana-cloud/send-data/agent/shared/flow/stability/experimental/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/stability/experimental/ -description: Shared content, experimental -headless: true ---- - -> **EXPERIMENTAL**: This is an [experimental][] component. Experimental -> components are subject to frequent breaking changes, and may be removed with -> no equivalent replacement. - -[experimental]: {{< relref "../../../stability.md#experimental" >}} diff --git a/docs/sources/shared/index.md b/docs/sources/shared/index.md index 8b1094f12e..c061db6fd4 100644 --- a/docs/sources/shared/index.md +++ b/docs/sources/shared/index.md @@ -1,8 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/ -- /docs/grafana-cloud/send-data/agent/shared/ -canonical: https://grafana.com/docs/agent/latest/shared/ +canonical: https://grafana.com/docs/alloy/latest/shared/ description: Shared content headless: true --- diff --git a/docs/sources/shared/flow/reference/components/authorization-block.md b/docs/sources/shared/reference/components/authorization-block.md similarity index 53% rename from docs/sources/shared/flow/reference/components/authorization-block.md rename to docs/sources/shared/reference/components/authorization-block.md index 11a74326f9..2c1f8a4354 100644 --- a/docs/sources/shared/flow/reference/components/authorization-block.md +++ b/docs/sources/shared/reference/components/authorization-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/authorization-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/authorization-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/authorization-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/authorization-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/authorization-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/authorization-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/authorization-block/ description: Shared content, authorization block headless: true --- diff --git a/docs/sources/shared/reference/components/azuread-block.md b/docs/sources/shared/reference/components/azuread-block.md new file mode 100644 index 0000000000..461402a5c9 --- /dev/null +++ b/docs/sources/shared/reference/components/azuread-block.md @@ -0,0 +1,14 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/azuread-block/ +description: Shared content, azuread block +headless: true +--- + +Name | Type | Description | Default | Required +--------|----------|------------------|-----------------|--------- +`cloud` | `string` | The Azure Cloud. | `"AzurePublic"` | no + +The supported values for `cloud` are: +* `"AzurePublic"` +* `"AzureChina"` +* `"AzureGovernment"` diff --git a/docs/sources/shared/flow/reference/components/basic-auth-block.md b/docs/sources/shared/reference/components/basic-auth-block.md similarity index 52% rename from docs/sources/shared/flow/reference/components/basic-auth-block.md rename to docs/sources/shared/reference/components/basic-auth-block.md index 62f7e0a25d..8ff77ae4e6 100644 --- a/docs/sources/shared/flow/reference/components/basic-auth-block.md +++ b/docs/sources/shared/reference/components/basic-auth-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/basic-auth-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/basic-auth-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/basic-auth-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/basic-auth-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/basic-auth-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/basic-auth-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/basic-auth-block/ description: Shared content, basic auth block headless: true --- diff --git a/docs/sources/shared/reference/components/exporter-component-exports.md b/docs/sources/shared/reference/components/exporter-component-exports.md new file mode 100644 index 0000000000..56867bfd0b --- /dev/null +++ b/docs/sources/shared/reference/components/exporter-component-exports.md @@ -0,0 +1,18 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/exporter-component-exports/ +description: Shared content, exporter component exports +headless: true +--- + +The following fields are exported and can be referenced by other components. + +Name | Type | Description +----------|---------------------|---------------------------------------------------------- +`targets` | `list(map(string))` | The targets that can be used to collect exporter metrics. + +For example, the `targets` can either be passed to a `discovery.relabel` component to rewrite the targets' label sets or to a `prometheus.scrape` component that collects the exposed metrics. + +The exported targets use the configured [in-memory traffic][] address specified by the [run command][]. + +[in-memory traffic]: ../../../concepts/component_controller/#in-memory-traffic +[run command]: ../../../reference/cli/run/ diff --git a/docs/sources/shared/flow/reference/components/extract-field-block.md b/docs/sources/shared/reference/components/extract-field-block.md similarity index 78% rename from docs/sources/shared/flow/reference/components/extract-field-block.md rename to docs/sources/shared/reference/components/extract-field-block.md index 207f2bc605..5946439435 100644 --- a/docs/sources/shared/flow/reference/components/extract-field-block.md +++ b/docs/sources/shared/reference/components/extract-field-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/extract-field-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/extract-field-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/extract-field-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/extract-field-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/extract-field-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/extract-field-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/extract-field-block/ description: Shared content, extract field block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/field-filter-block.md b/docs/sources/shared/reference/components/field-filter-block.md similarity index 58% rename from docs/sources/shared/flow/reference/components/field-filter-block.md rename to docs/sources/shared/reference/components/field-filter-block.md index 266af75980..f9d862a871 100644 --- a/docs/sources/shared/flow/reference/components/field-filter-block.md +++ b/docs/sources/shared/reference/components/field-filter-block.md @@ -1,12 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/filter-field-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/filter-field-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/field-filter-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/filter-field-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/filter-field-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/field-filter-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/filter-field-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/filter-field-block/ description: Shared content, filter field block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/http-client-config-block.md b/docs/sources/shared/reference/components/http-client-config-block.md similarity index 67% rename from docs/sources/shared/flow/reference/components/http-client-config-block.md rename to docs/sources/shared/reference/components/http-client-config-block.md index a115d031b2..8a41b288d1 100644 --- a/docs/sources/shared/flow/reference/components/http-client-config-block.md +++ b/docs/sources/shared/reference/components/http-client-config-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/http-client-config-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/http-client-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/http-client-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/http-client-config-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/http-client-config-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/http-client-config-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/http-client-config-block/ description: Shared content, http client config block headless: true --- @@ -23,4 +17,4 @@ Name | Type | Description `bearer_token`, `bearer_token_file`, `basic_auth`, `authorization`, and `oauth2` are mutually exclusive, and only one can be provided inside of a `http_client_config` block. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} \ No newline at end of file +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} diff --git a/docs/sources/shared/reference/components/http-client-proxy-config-description.md b/docs/sources/shared/reference/components/http-client-proxy-config-description.md new file mode 100644 index 0000000000..5da832a3a6 --- /dev/null +++ b/docs/sources/shared/reference/components/http-client-proxy-config-description.md @@ -0,0 +1,14 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/http-client-proxy-config-description-args/ +description: Shared content, http client config description +headless: true +--- + +`no_proxy` can contain IPs, CIDR notations, and domain names. IP and domain names can contain port numbers. +`proxy_url` must be configured if `no_proxy` is configured. + +`proxy_from_environment` uses the environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof). +Requests use the proxy from the environment variable matching their scheme, unless excluded by NO_PROXY. +`proxy_url` and `no_proxy` must not be configured if `proxy_from_environment` is configured. + +`proxy_connect_header` should only be configured if `proxy_url` or `proxy_from_environment` are configured. diff --git a/docs/sources/shared/flow/reference/components/local-file-arguments-text.md b/docs/sources/shared/reference/components/local-file-arguments-text.md similarity index 64% rename from docs/sources/shared/flow/reference/components/local-file-arguments-text.md rename to docs/sources/shared/reference/components/local-file-arguments-text.md index 4b83c2291c..ccae890132 100644 --- a/docs/sources/shared/flow/reference/components/local-file-arguments-text.md +++ b/docs/sources/shared/reference/components/local-file-arguments-text.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/local-file-arguments-text/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/local-file-arguments-text/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/local-file-arguments-text/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/local-file-arguments-text/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/local-file-arguments-text/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/local-file-arguments-text/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/local-file-arguments-text/ description: Shared content, local file arguments text headless: true --- diff --git a/docs/sources/shared/flow/reference/components/loki-server-grpc.md b/docs/sources/shared/reference/components/loki-server-grpc.md similarity index 80% rename from docs/sources/shared/flow/reference/components/loki-server-grpc.md rename to docs/sources/shared/reference/components/loki-server-grpc.md index ffb0081ec3..9cfca25b99 100644 --- a/docs/sources/shared/flow/reference/components/loki-server-grpc.md +++ b/docs/sources/shared/reference/components/loki-server-grpc.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/loki-server-grpc/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/loki-server-grpc/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/loki-server-grpc/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/loki-server-grpc/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/loki-server-grpc/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/loki-server-grpc/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/loki-server-grpc/ description: Shared content, loki server grpc headless: true --- diff --git a/docs/sources/shared/flow/reference/components/loki-server-http.md b/docs/sources/shared/reference/components/loki-server-http.md similarity index 73% rename from docs/sources/shared/flow/reference/components/loki-server-http.md rename to docs/sources/shared/reference/components/loki-server-http.md index a418dbd892..e0510dcd70 100644 --- a/docs/sources/shared/flow/reference/components/loki-server-http.md +++ b/docs/sources/shared/reference/components/loki-server-http.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/loki-server-http/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/loki-server-http/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/loki-server-http/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/loki-server-http/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/loki-server-http/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/loki-server-http/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/loki-server-http/ description: Shared content, loki server http headless: true --- diff --git a/docs/sources/shared/flow/reference/components/managed_identity-block.md b/docs/sources/shared/reference/components/managed_identity-block.md similarity index 55% rename from docs/sources/shared/flow/reference/components/managed_identity-block.md rename to docs/sources/shared/reference/components/managed_identity-block.md index 2e51a03050..fe255aa3de 100644 --- a/docs/sources/shared/flow/reference/components/managed_identity-block.md +++ b/docs/sources/shared/reference/components/managed_identity-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/managed_identity-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/managed_identity-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/managed_identity-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/managed_identity-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/managed_identity-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/managed_identity-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/managed_identity-block/ description: Shared content, managed_identity block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/match-properties-block.md b/docs/sources/shared/reference/components/match-properties-block.md similarity index 71% rename from docs/sources/shared/flow/reference/components/match-properties-block.md rename to docs/sources/shared/reference/components/match-properties-block.md index 863f4e406b..0b16d7042e 100644 --- a/docs/sources/shared/flow/reference/components/match-properties-block.md +++ b/docs/sources/shared/reference/components/match-properties-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/match-properties-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/match-properties-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/match-properties-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/match-properties-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/match-properties-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/match-properties-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/match-properties-block/ description: Shared content, match properties block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/oauth2-block.md b/docs/sources/shared/reference/components/oauth2-block.md similarity index 72% rename from docs/sources/shared/flow/reference/components/oauth2-block.md rename to docs/sources/shared/reference/components/oauth2-block.md index bba91c84a7..75515bbecf 100644 --- a/docs/sources/shared/flow/reference/components/oauth2-block.md +++ b/docs/sources/shared/reference/components/oauth2-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/oauth2-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/oauth2-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/oauth2-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/oauth2-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/oauth2-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/oauth2-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/oauth2-block/ description: Shared content, oauth2 block headless: true --- @@ -27,4 +21,4 @@ Name | Type | Description The `oauth2` block may also contain a separate `tls_config` sub-block. -{{< docs/shared lookup="flow/reference/components/http-client-proxy-config-description.md" source="agent" version="" >}} \ No newline at end of file +{{< docs/shared lookup="reference/components/http-client-proxy-config-description.md" source="alloy" version="" >}} diff --git a/docs/sources/shared/reference/components/otelcol-compression-field.md b/docs/sources/shared/reference/components/otelcol-compression-field.md new file mode 100644 index 0000000000..2ae80b4387 --- /dev/null +++ b/docs/sources/shared/reference/components/otelcol-compression-field.md @@ -0,0 +1,16 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/otelcol-compression-field/ +description: Shared content, otelcol compression field +headless: true +--- + +By default, requests are compressed with gzip. +The `compression` argument controls which compression mechanism to use. Supported strings are: + +* `"gzip"` +* `"zlib"` +* `"deflate"` +* `"snappy"` +* `"zstd"` + +If `compression` is set to `"none"` or an empty string `""`, no compression is used. diff --git a/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md b/docs/sources/shared/reference/components/otelcol-debug-metrics-block.md similarity index 59% rename from docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md rename to docs/sources/shared/reference/components/otelcol-debug-metrics-block.md index 2997d8c140..704c6e2776 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md +++ b/docs/sources/shared/reference/components/otelcol-debug-metrics-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-debug-metrics-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-debug-metrics-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-debug-metrics-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-debug-metrics-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-debug-metrics-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/otelcol-debug-metrics-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/otelcol-debug-metrics-block/ description: Shared content, otelcol debug metrics block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-filter-attribute-block.md b/docs/sources/shared/reference/components/otelcol-filter-attribute-block.md similarity index 59% rename from docs/sources/shared/flow/reference/components/otelcol-filter-attribute-block.md rename to docs/sources/shared/reference/components/otelcol-filter-attribute-block.md index b4226ada23..c939fe0188 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-filter-attribute-block.md +++ b/docs/sources/shared/reference/components/otelcol-filter-attribute-block.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-filter-attribute-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-filter-attribute-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-filter-attribute-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-filter-attribute-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-filter-attribute-block/ description: Shared content, otelcol filter attribute block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-filter-library-block.md b/docs/sources/shared/reference/components/otelcol-filter-library-block.md similarity index 59% rename from docs/sources/shared/flow/reference/components/otelcol-filter-library-block.md rename to docs/sources/shared/reference/components/otelcol-filter-library-block.md index b2d4b5ddac..2467e466b1 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-filter-library-block.md +++ b/docs/sources/shared/reference/components/otelcol-filter-library-block.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-filter-library-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-filter-library-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-filter-library-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-filter-library-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-filter-library-block/ description: Shared content, otelcol filter library block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-filter-log-severity-block.md b/docs/sources/shared/reference/components/otelcol-filter-log-severity-block.md similarity index 72% rename from docs/sources/shared/flow/reference/components/otelcol-filter-log-severity-block.md rename to docs/sources/shared/reference/components/otelcol-filter-log-severity-block.md index 35633a4d6b..c0a5b02daf 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-filter-log-severity-block.md +++ b/docs/sources/shared/reference/components/otelcol-filter-log-severity-block.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-filter-log-severity-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-filter-log-severity-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-filter-log-severity-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-filter-log-severity-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-filter-log-severity-block/ description: Shared content, otelcol filter log severity block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-filter-regexp-block.md b/docs/sources/shared/reference/components/otelcol-filter-regexp-block.md similarity index 64% rename from docs/sources/shared/flow/reference/components/otelcol-filter-regexp-block.md rename to docs/sources/shared/reference/components/otelcol-filter-regexp-block.md index d265dffc60..706de5f28d 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-filter-regexp-block.md +++ b/docs/sources/shared/reference/components/otelcol-filter-regexp-block.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-filter-regexp-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-filter-regexp-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-filter-regexp-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-filter-regexp-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-filter-regexp-block/ description: Shared content, otelcol filter regexp block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-filter-resource-block.md b/docs/sources/shared/reference/components/otelcol-filter-resource-block.md similarity index 58% rename from docs/sources/shared/flow/reference/components/otelcol-filter-resource-block.md rename to docs/sources/shared/reference/components/otelcol-filter-resource-block.md index 446bdac82a..ec15864e79 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-filter-resource-block.md +++ b/docs/sources/shared/reference/components/otelcol-filter-resource-block.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-filter-resource-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-filter-resource-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-filter-resource-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-filter-resource-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-filter-resource-block/ description: Shared content, otelcol filter resource block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-grpc-authority.md b/docs/sources/shared/reference/components/otelcol-grpc-authority.md similarity index 59% rename from docs/sources/shared/flow/reference/components/otelcol-grpc-authority.md rename to docs/sources/shared/reference/components/otelcol-grpc-authority.md index 15642a0b91..a3905820ad 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-grpc-authority.md +++ b/docs/sources/shared/reference/components/otelcol-grpc-authority.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-grpc-authority/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-grpc-authority/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-grpc-authority/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-grpc-authority/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-grpc-authority/ description: Shared content, otelcol grpc authority headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-grpc-balancer-name.md b/docs/sources/shared/reference/components/otelcol-grpc-balancer-name.md similarity index 60% rename from docs/sources/shared/flow/reference/components/otelcol-grpc-balancer-name.md rename to docs/sources/shared/reference/components/otelcol-grpc-balancer-name.md index 4c30602ad4..bcff954555 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-grpc-balancer-name.md +++ b/docs/sources/shared/reference/components/otelcol-grpc-balancer-name.md @@ -1,10 +1,4 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-grpc-balancer-name/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-grpc-balancer-name/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-grpc-balancer-name/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-grpc-balancer-name/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-grpc-balancer-name/ description: Shared content, otelcol grpc balancer name headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-queue-block.md b/docs/sources/shared/reference/components/otelcol-queue-block.md similarity index 72% rename from docs/sources/shared/flow/reference/components/otelcol-queue-block.md rename to docs/sources/shared/reference/components/otelcol-queue-block.md index 09bbf8205a..a7fbde5804 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-queue-block.md +++ b/docs/sources/shared/reference/components/otelcol-queue-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-queue-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-queue-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-queue-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-queue-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-queue-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/otelcol-queue-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/otelcol-queue-block/ description: Shared content, otelcol queue block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-retry-block.md b/docs/sources/shared/reference/components/otelcol-retry-block.md similarity index 77% rename from docs/sources/shared/flow/reference/components/otelcol-retry-block.md rename to docs/sources/shared/reference/components/otelcol-retry-block.md index 546947f12c..95900714c3 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-retry-block.md +++ b/docs/sources/shared/reference/components/otelcol-retry-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-retry-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-retry-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-retry-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-retry-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-retry-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/otelcol-retry-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/otelcol-retry-block/ description: Shared content, otelcol retry block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/otelcol-tls-config-block.md b/docs/sources/shared/reference/components/otelcol-tls-config-block.md similarity index 77% rename from docs/sources/shared/flow/reference/components/otelcol-tls-config-block.md rename to docs/sources/shared/reference/components/otelcol-tls-config-block.md index caf4d45001..a1d0086043 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-tls-config-block.md +++ b/docs/sources/shared/reference/components/otelcol-tls-config-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/otelcol-tls-config-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/otelcol-tls-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/otelcol-tls-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/otelcol-tls-config-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/otelcol-tls-config-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/otelcol-tls-config-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/otelcol-tls-config-block/ description: Shared content, otelcol tls config block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/output-block-logs.md b/docs/sources/shared/reference/components/output-block-logs.md similarity index 55% rename from docs/sources/shared/flow/reference/components/output-block-logs.md rename to docs/sources/shared/reference/components/output-block-logs.md index fbdc891362..f340cc42dc 100644 --- a/docs/sources/shared/flow/reference/components/output-block-logs.md +++ b/docs/sources/shared/reference/components/output-block-logs.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../otelcol/output-block-logs/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/output-block-logs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/output-block-logs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/output-block-logs/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/output-block-logs/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/output-block-logs/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/output-block-logs/ description: Shared content, output block logs headless: true --- diff --git a/docs/sources/shared/flow/reference/components/output-block-metrics.md b/docs/sources/shared/reference/components/output-block-metrics.md similarity index 56% rename from docs/sources/shared/flow/reference/components/output-block-metrics.md rename to docs/sources/shared/reference/components/output-block-metrics.md index 25818897ca..5b0d883427 100644 --- a/docs/sources/shared/flow/reference/components/output-block-metrics.md +++ b/docs/sources/shared/reference/components/output-block-metrics.md @@ -1,10 +1,4 @@ --- -aliases: -- ../../otelcol/output-block-metrics/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/output-block-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/output-block-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/output-block-metrics/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/output-block-metrics/ description: Shared content, output block metrics headless: true --- diff --git a/docs/sources/shared/flow/reference/components/output-block-traces.md b/docs/sources/shared/reference/components/output-block-traces.md similarity index 55% rename from docs/sources/shared/flow/reference/components/output-block-traces.md rename to docs/sources/shared/reference/components/output-block-traces.md index 124f42115d..bb39d85a07 100644 --- a/docs/sources/shared/flow/reference/components/output-block-traces.md +++ b/docs/sources/shared/reference/components/output-block-traces.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../otelcol/output-block-traces/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/output-block-traces/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/output-block-traces/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/output-block-traces/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/output-block-traces/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/output-block-traces/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/output-block-traces/ description: Shared content, output block traces headless: true --- diff --git a/docs/sources/shared/flow/reference/components/output-block.md b/docs/sources/shared/reference/components/output-block.md similarity index 63% rename from docs/sources/shared/flow/reference/components/output-block.md rename to docs/sources/shared/reference/components/output-block.md index c3ad30e782..4d0d196a09 100644 --- a/docs/sources/shared/flow/reference/components/output-block.md +++ b/docs/sources/shared/reference/components/output-block.md @@ -1,11 +1,5 @@ --- -aliases: -- ../../otelcol/output-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/output-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/output-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/output-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/output-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/output-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/output-block/ description: Shared content, output block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/prom-operator-scrape.md b/docs/sources/shared/reference/components/prom-operator-scrape.md similarity index 68% rename from docs/sources/shared/flow/reference/components/prom-operator-scrape.md rename to docs/sources/shared/reference/components/prom-operator-scrape.md index 156198d17b..03f939b061 100644 --- a/docs/sources/shared/flow/reference/components/prom-operator-scrape.md +++ b/docs/sources/shared/reference/components/prom-operator-scrape.md @@ -1,9 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/prom-operator-scrape/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/prom-operator-scrape/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/prom-operator-scrape/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/prom-operator-scrape/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/prom-operator-scrape/ description: Shared content, prom operator scrape headless: true --- diff --git a/docs/sources/shared/flow/reference/components/rule-block-logs.md b/docs/sources/shared/reference/components/rule-block-logs.md similarity index 87% rename from docs/sources/shared/flow/reference/components/rule-block-logs.md rename to docs/sources/shared/reference/components/rule-block-logs.md index 3db6449ed1..6fdc772d61 100644 --- a/docs/sources/shared/flow/reference/components/rule-block-logs.md +++ b/docs/sources/shared/reference/components/rule-block-logs.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/rule-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/rule-block-logs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/rule-block-logs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/rule-block-logs/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/rule-block-logs/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/rule-block-logs/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/rule-block-logs/ description: Shared content, rule block logs headless: true --- diff --git a/docs/sources/shared/flow/reference/components/rule-block.md b/docs/sources/shared/reference/components/rule-block.md similarity index 87% rename from docs/sources/shared/flow/reference/components/rule-block.md rename to docs/sources/shared/reference/components/rule-block.md index 614b062b0e..e59d5047d4 100644 --- a/docs/sources/shared/flow/reference/components/rule-block.md +++ b/docs/sources/shared/reference/components/rule-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/rule-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/rule-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/rule-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/rule-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/rule-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/rule-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/rule-block/ description: Shared content, rule block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/sigv4-block.md b/docs/sources/shared/reference/components/sigv4-block.md similarity index 65% rename from docs/sources/shared/flow/reference/components/sigv4-block.md rename to docs/sources/shared/reference/components/sigv4-block.md index 54598570ad..7c14c3d614 100644 --- a/docs/sources/shared/flow/reference/components/sigv4-block.md +++ b/docs/sources/shared/reference/components/sigv4-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/sigv4-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/sigv4-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/sigv4-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/sigv4-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/sigv4-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/sigv4-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/sigv4-block/ description: Shared content, sigv4 block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/tls-config-block.md b/docs/sources/shared/reference/components/tls-config-block.md similarity index 77% rename from docs/sources/shared/flow/reference/components/tls-config-block.md rename to docs/sources/shared/reference/components/tls-config-block.md index 1b92e91d0b..7c4b45145c 100644 --- a/docs/sources/shared/flow/reference/components/tls-config-block.md +++ b/docs/sources/shared/reference/components/tls-config-block.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/tls-config-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/tls-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/tls-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/tls-config-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/tls-config-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/tls-config-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/tls-config-block/ description: Shared content, tls config block headless: true --- diff --git a/docs/sources/shared/flow/reference/components/write_relabel_config.md b/docs/sources/shared/reference/components/write_relabel_config.md similarity index 84% rename from docs/sources/shared/flow/reference/components/write_relabel_config.md rename to docs/sources/shared/reference/components/write_relabel_config.md index db06408464..dfa6084a7d 100644 --- a/docs/sources/shared/flow/reference/components/write_relabel_config.md +++ b/docs/sources/shared/reference/components/write_relabel_config.md @@ -1,11 +1,5 @@ --- -aliases: -- /docs/agent/shared/flow/reference/components/write-relabel-config-block/ -- /docs/grafana-cloud/agent/shared/flow/reference/components/write-relabel-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/flow/reference/components/write-relabel-config-block/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/flow/reference/components/write-relabel-config-block/ -- /docs/grafana-cloud/send-data/agent/shared/flow/reference/components/write-relabel-config-block/ -canonical: https://grafana.com/docs/agent/latest/shared/flow/reference/components/write-relabel-config-block/ +canonical: https://grafana.com/docs/alloy/latest/shared/reference/components/write-relabel-config-block/ description: Shared content, write_relabel_config block headless: true --- @@ -13,10 +7,8 @@ headless: true -The `write_relabel_config` block contains the definition of any relabeling -rules that can be applied to an input metric. -If more than one `write_relabel_config` block is defined, the transformations -are applied in top-down order. +The `write_relabel_config` block contains the definition of any relabeling rules that can be applied to an input metric. +If more than one `write_relabel_config` block is defined, the transformations are applied in top-down order. The following arguments can be used to configure a `write_relabel_config`. All arguments are optional. Omitted fields take their default values. diff --git a/docs/sources/shared/stability/beta.md b/docs/sources/shared/stability/beta.md new file mode 100644 index 0000000000..0935e5b70d --- /dev/null +++ b/docs/sources/shared/stability/beta.md @@ -0,0 +1,11 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/stability/beta/ +description: Shared content, beta +headless: true +--- + +> **BETA**: This is a [beta][] component. Beta components are subject to breaking +> changes, and may be replaced with equivalent functionality that cover the +> same use case. + +[beta]: ../../../stability/#beta diff --git a/docs/sources/shared/stability/experimental.md b/docs/sources/shared/stability/experimental.md new file mode 100644 index 0000000000..6028ba3cdd --- /dev/null +++ b/docs/sources/shared/stability/experimental.md @@ -0,0 +1,11 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/shared/stability/experimental/ +description: Shared content, experimental +headless: true +--- + +> **EXPERIMENTAL**: This is an [experimental][] component. Experimental +> components are subject to frequent breaking changes, and may be removed with +> no equivalent replacement. + +[experimental]: ../../../stability/#experimental diff --git a/docs/sources/shared/wal-data-retention.md b/docs/sources/shared/wal-data-retention.md deleted file mode 100644 index e7fa388718..0000000000 --- a/docs/sources/shared/wal-data-retention.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -aliases: -- /docs/agent/shared/wal-data-retention/ -- /docs/grafana-cloud/agent/shared/wal-data-retention/ -- /docs/grafana-cloud/monitor-infrastructure/agent/shared/wal-data-retention/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/shared/wal-data-retention/ -- /docs/grafana-cloud/send-data/agent/shared/wal-data-retention/ -canonical: https://grafana.com/docs/agent/latest/shared/wal-data-retention/ -description: Shared content, information about data retention in the WAL -headless: true ---- - -The `prometheus.remote_write` component uses a Write Ahead Log (WAL) to prevent -data loss during network outages. The component buffers the received metrics in -a WAL for each configured endpoint. The queue shards can use the WAL after the -network outage is resolved and flush the buffered metrics to the endpoints. - -The WAL records metrics in 128 MB files called segments. To avoid having a WAL -that grows on-disk indefinitely, the component _truncates_ its segments on a -set interval. - -On each truncation, the WAL deletes references to series that are no longer -present and also _checkpoints_ roughly the oldest two thirds of the segments -(rounded down to the nearest integer) written to it since the last truncation -period. A checkpoint means that the WAL only keeps track of the unique -identifier for each existing metrics series, and can no longer use the samples -for remote writing. If that data has not yet been pushed to the remote -endpoint, it is lost. - -This behavior dictates the data retention for the `prometheus.remote_write` -component. It also means that it's impossible to directly correlate data -retention directly to the data age itself, as the truncation logic works on -_segments_, not the samples themselves. This makes data retention less -predictable when the component receives a non-consistent rate of data. - -The [WAL block][] in Flow mode, or the [metrics config][] in Static mode -contain some configurable parameters that can be used to control the tradeoff -between memory usage, disk usage, and data retention. - -The `truncate_frequency` or `wal_truncate_frequency` parameter configures the -interval at which truncations happen. A lower value leads to reduced memory -usage, but also provides less resiliency to long outages. - -When a WAL clean-up starts, the most recently successfully sent timestamp is -used to determine how much data is safe to remove from the WAL. -The `min_keepalive_time` or `min_wal_time` controls the minimum age of samples -considered for removal. No samples more recent than `min_keepalive_time` are -removed. The `max_keepalive_time` or `max_wal_time` controls the maximum age of -samples that can be kept in the WAL. Samples older than -`max_keepalive_time` are forcibly removed. - -### Extended `remote_write` outages -When the remote write endpoint is unreachable over a period of time, the most -recent successfully sent timestamp is not updated. The -`min_keepalive_time` and `max_keepalive_time` arguments control the age range -of data kept in the WAL. - -If the remote write outage is longer than the `max_keepalive_time` parameter, -then the WAL is truncated, and the oldest data is lost. - -### Intermittent `remote_write` outages -If the remote write endpoint is intermittently reachable, the most recent -successfully sent timestamp is updated whenever the connection is successful. -A successful connection updates the series' comparison with -`min_keepalive_time` and triggers a truncation on the next `truncate_frequency` -interval which checkpoints two thirds of the segments (rounded down to the -nearest integer) written since the previous truncation. - -### Falling behind -If the queue shards cannot flush data quickly enough to keep -up-to-date with the most recent data buffered in the WAL, we say that the -component is 'falling behind'. -It's not unusual for the component to temporarily fall behind 2 or 3 scrape intervals. -If the component falls behind more than one third of the data written since the -last truncate interval, it is possible for the truncate loop to checkpoint data -before being pushed to the remote_write endpoint. - -### WAL corruption - -WAL corruption can occur when Grafana Agent unexpectedly stops while the latest WAL segments -are still being written to disk. For example, the host computer has a general disk failure -and crashes before you can stop Grafana Agent and other running services. When you restart Grafana -Agent, it verifies the WAL, removing any corrupt segments it finds. Sometimes, this repair -is unsuccessful, and you must manually delete the corrupted WAL to continue. - -If the WAL becomes corrupted, Grafana Agent writes error messages such as -`err="failed to find segment for index"` to the log file. - -{{< admonition type="note" >}} -Deleting a WAL segment or a WAL file permanently deletes the stored WAL data. -{{< /admonition >}} - -To delete the corrupted WAL: - -1. [Stop][] Grafana Agent. -1. Find and delete the contents of the `wal` directory. - - By default the `wal` directory is a subdirectory - of the `data-agent` directory located in the Grafana Agent working directory. The WAL data directory - may be different than the default depending on the [wal_directory][] setting in your Static configuration - file or the path specified by the Flow [command line flag][run] `--storage-path`. - - {{< admonition type="note" >}} - There is one `wal` directory per: - - * Metrics instance running in Static mode - * `prometheus.remote_write` component running in Flow mode - {{< /admonition >}} - -1. [Start][Stop] Grafana Agent and verify that the WAL is working correctly. - -[WAL block]: /docs/agent//flow/reference/components/prometheus.remote_write#wal-block -[metrics config]: /docs/agent//static/configuration/metrics-config -[Stop]: /docs/agent//flow/get-started/start-agent -[wal_directory]: /docs/agent//static/configuration/metrics-config -[run]: /docs/agent//flow/reference/cli/run diff --git a/docs/sources/stability.md b/docs/sources/stability.md index c21d549aeb..2cdb1d0087 100644 --- a/docs/sources/stability.md +++ b/docs/sources/stability.md @@ -1,28 +1,22 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/stability/ -- /docs/grafana-cloud/send-data/agent/stability/ -canonical: https://grafana.com/docs/agent/latest/stability/ -description: Grafana Agent features fall into one of three stability categories, experimental, - beta, or stable +canonical: https://grafana.com/docs/alloy/latest/stability/ +description: Grafana Alloy features fall into one of three stability categories, experimental, beta, or stable title: Stability weight: 600 --- # Stability -Stability of functionality usually refers to the stability of a _use case,_ -such as collecting and forwarding OpenTelemetry metrics. +Stability of functionality usually refers to the stability of a _use case,_ such as collecting and forwarding OpenTelemetry metrics. -Features within the Grafana Agent project will fall into one of three stability -categories: +Features within the {{< param "PRODUCT_NAME" >}} project will fall into one of three stability categories: * **Experimental**: A new use case is being explored. * **Beta**: Functionality covering a use case is being matured. * **Stable**: Functionality covering a use case is believed to be stable. -The default stability is stable; features will be explicitly marked as -experimental or beta if they are not stable. +The default stability is stable. +Features are explicitly marked as experimental or beta if they aren't stable. ## Experimental @@ -37,22 +31,18 @@ Unless removed, experimental features eventually graduate to beta. ## Beta -The **beta** stability category is used to denote a feature which is being -matured. +The **beta** stability category is used to denote a feature which is being matured. * Beta features are subject to occasional breaking changes. -* Beta features can be replaced by equivalent functionality that covers the - same use case. +* Beta features can be replaced by equivalent functionality that covers the same use case. * Beta features can be used without enabling feature flags. -Unless replaced with equivalent functionality, beta features eventually -graduate to stable. +Unless replaced with equivalent functionality, beta features eventually graduate to stable. ## Stable The **stable** stability category is used to denote a feature as stable. * Breaking changes to stable features are rare, and will be well-documented. -* If new functionality is introduced to replace existing stable functionality, - deprecation and removal timeline will be well-documented. +* If new functionality is introduced to replace existing stable functionality, deprecation and removal timeline will be well-documented. * Stable features can be used without enabling feature flags. diff --git a/docs/sources/static/_index.md b/docs/sources/static/_index.md deleted file mode 100644 index 4ce1f42036..0000000000 --- a/docs/sources/static/_index.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/static/ -- /docs/grafana-cloud/send-data/agent/static/ -canonical: https://grafana.com/docs/agent/latest/static/ -description: Learn about Grafana Agent in static mode -title: Static mode -weight: 200 ---- - -# Static mode - -Static mode is the original mode of Grafana Agent. -Static mode is composed of different _subsystems_: - -* The _metrics subsystem_ wraps around Prometheus for collecting Prometheus - metrics and forwarding them over the Prometheus `remote_write` protocol. - -* The _logs subsystem_ wraps around Grafana Promtail for collecting logs and - forwarding them to Grafana Loki. - -* The _traces subsystem_ wraps around OpenTelemetry Collector for collecting - traces and forwarding them to Grafana Tempo or any OpenTelemetry-compatible - endpoint. - -Static mode is [configured][configure] with a YAML file. - -Static mode works with: - -- Grafana Cloud -- Grafana Enterprise Stack -- OSS deployments of Grafana Loki, Grafana Mimir, Grafana Tempo, and Prometheus - -This topic helps you to think about what you're trying to accomplish and how to -use Grafana Agent to meet your goals. - -You can [set up][] and [configure][] Grafana Agent in static mode manually, or -you can follow the common workflows described in this topic. - -## Topics - -### Static mode Grafana Agent for Grafana Cloud integrations - -There are different ways for you to set up Grafana Agent to scrape -data—through Grafana's integration platform or directly. Select a guide -to get started: - -| Topic | Description | -|---|---| -| [Get started with monitoring using an integration](/docs/grafana-cloud/data-configuration/get-started-integration/) | Walk through installing a Linux integration using Grafana Agent in the Grafana Cloud interface. | -| [Install and manage integrations](/docs/grafana-cloud/data-configuration/integrations/install-and-manage-integrations/) | View general steps for using Grafana Cloud integrations to install Grafana Agent to collect data. See [supported integrations](/docs/grafana-cloud/data-configuration/integrations/integration-reference/). -| [Ship your metrics to Grafana Cloud without an integration](/docs/grafana-cloud/data-configuration/metrics/agent-config-exporter/) | If you want to ship your Prometheus metrics to Grafana Cloud but there isn’t an integration available, you can use a Prometheus exporter and deploy Grafana Agent to scrape your local machine or service. | -| [Change your metrics scrape interval](/docs/grafana-cloud/billing-and-usage/control-prometheus-metrics-usage/changing-scrape-interval/) | Learn about reducing your total data points per minute (DPM) by adjusting your scrape interval. | - -### Static mode Grafana Agent for Kubernetes Monitoring - -Grafana Kubernetes Monitoring provides a simplified approach to monitoring your Kubernetes fleet by deploying Grafana Agent with useful defaults for collecting metrics. Select a guide to get started monitoring Kubernetes: - -| Topic | Description | -|---|---| -| [Configure Kubernetes Monitoring using Agent](/docs/grafana-cloud/kubernetes-monitoring/configuration/) | Use the Kubernetes Monitoring solution to set up monitoring of your Kubernetes cluster and to install preconfigured dashboards and alerts. | -| [Ship Kubernetes traces using Grafana Agent directly](/docs/grafana-cloud/kubernetes-monitoring/other-methods/k8s-agent-traces/) | Deploy Grafana Agent into your Kubernetes cluster as a deployment and configure it to collect traces for your Kubernetes workloads. | - -### Use Grafana Agent directly to scrape telemetry data - -Grafana Cloud integration workflows and the Kubernetes Monitoring solution are the easiest ways to get started collecting telemetry data, but sometimes you might want to use a manual approach to set your configuration options. - -| Topic | Description | -|---|---| -| [Install or uninstall Grafana Agent][install] | Install or uninstall Grafana Agent. | -| [Troubleshoot Cloud Integrations installation on Linux](/docs/grafana-cloud/monitor-infrastructure/integrations/install-troubleshoot-linux/) | Troubleshoot common errors when executing the Grafana Agent installation script on Linux. | -| [Troubleshoot Cloud Integrations installation on Mac](/docs/grafana-cloud/monitor-infrastructure/integrations/install-troubleshoot-mac/) | Troubleshoot common errors when executing the Grafana Agent installation script on Mac. | -| [Troubleshoot Cloud Integrations installation on Windows](/docs/grafana-cloud/monitor-infrastructure/integrations/install-troubleshooting-windows/) | Troubleshoot common errors when executing the Grafana Agent installation script on Windows. | - -### Use Grafana Agent to send logs to Grafana Loki - -Logs are included when you [set up a Cloud integration](/docs/grafana-cloud/data-configuration/integrations/install-and-manage-integrations) but you can take a more hands-on approach with the following guide. - -| Topic | Description | -|---|---| -| [Collect logs with Grafana Agent](/docs/grafana-cloud/data-configuration/logs/collect-logs-with-agent/) | Install Grafana Agent to collect logs for use with Grafana Loki, included with your [Grafana Cloud account](/docs/grafana-cloud/account-management/cloud-portal/). | - -### Use Grafana Agent to send traces to Grafana Tempo - -| Topic | Description | -|---|---| -| [Set up and use tracing](/docs/grafana-cloud/data-configuration/traces/set-up-and-use-tempo/) | Install Grafana Agent to collect traces for use with Grafana Tempo, included with your [Grafana Cloud account](/docs/grafana-cloud/account-management/cloud-portal/). | -| [Use Grafana Agent as a tracing pipeline](/docs/tempo/latest/configuration/grafana-agent/) | Grafana Agent can be configured to run a set of tracing pipelines to collect data from your applications and write it to Grafana Tempo. Pipelines are built using OpenTelemetry, and consist of receivers, processors, and exporters. | - -{{% docs/reference %}} -[set up]: "/docs/agent/ -> /docs/agent//static/set-up" -[set up]: "/docs/grafana-cloud/ -> ./set-up" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration" -[configure]: "/docs/grafana-cloud/ -> ./configuration" -[install]: "/docs/agent/ -> /docs/agent//static/set-up/install" -[install]: "/docs/grafana-cloud/ -> ./set-up/install" -{{% /docs/reference %}} diff --git a/docs/sources/static/api/_index.md b/docs/sources/static/api/_index.md deleted file mode 100644 index 1f6715e9d7..0000000000 --- a/docs/sources/static/api/_index.md +++ /dev/null @@ -1,539 +0,0 @@ ---- -aliases: -- ../api/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/api/ -- /docs/grafana-cloud/send-data/agent/static/api/ -canonical: https://grafana.com/docs/agent/latest/static/api/ -description: Learn about the Grafana Agent static mode API -menuTitle: Static mode API -title: Static mode APIs (Stable) -weight: 400 ---- - -# Static mode APIs (Stable) - -The API for static mode is divided into several parts: - -- [Config Management API](#config-management-api-beta) -- [Agent API](#agent-api) -- [Integrations API](#integrations-api-experimental) -- [Ready/Healthy API](#ready--health-api) - -API endpoints are stable unless otherwise noted. - -## Config management API (Beta) - -Grafana Agent exposes a configuration management REST API for managing instance configurations when it's running in [scraping service mode][scrape]. - -{{< admonition type="note" >}} -The scraping service mode is a requirement for the configuration management -API, however this isn't a prerequisite for the Agent API or Ready/Healthy API. -{{< /admonition >}} - -The following endpoints are exposed: - -- List configs: [`GET /agent/api/v1/configs`](#list-configs) -- Get config: [`GET /agent/api/v1/configs/{name}`](#get-config) -- Update config: [`PUT /agent/api/v1/config/{name}`](#update-config) -- Delete config: [`DELETE /agent/api/v1/config/{name}`](#delete-config) - -{{< admonition type="note" >}} -If you are running Grafana Agent in a Docker container and you want to expose the API outside the Docker container, you must change the default HTTP listen address from `127.0.0.1:12345` to a valid network interface address. -You can change the HTTP listen address with the command-line flag: `-server.http.address=0.0.0.0:12345`. -For more information, refer to the [Server](https://grafana.com/docs/agent/latest/static/configuration/flags/#server) command-line flag documentation. - -You must also publish the port in Docker. Refer to [Published ports](https://docs.docker.com/network/#published-ports) in the Docker documentation for more information. -{{< /admonition >}} - -### API response - -All Config Management API endpoints will return responses in the following -form, unless an internal service error prevents the server from responding -properly: - -``` -{ - "status": "success" | "error", - "data": {} -} -``` - -Status will be either `success` or `error`. All 2xx responses will be -accompanied by a `success` value for the status field. 4xx and 5xx -responses will provide a value of `error`. All requests may potentially -return 500 on an internal error. Other non-500 responses will be documented -per API. - -The data field may or may not be present, depending on the endpoint. It -provides extra information for the query. The documentation for each endpoint -will describe the full response provided. - -### List configs - -``` -GET /agent/api/v1/configs -``` - -List configs returns a list of the named configurations currently known by the -underlying KV store. - -Status code: 200 on success. -Response: - -``` -{ - "status": "success", - "data": { - "configs": [ - // list of config names: - "a", - "b", - "c", - // ... - ] - } -} -``` - -### Get config - -``` -GET /agent/api/v1/configs/{name} -``` - -Get config returns a single configuration by name. The configuration must -exist or an error will be returned. URL-encoded names will be retrieved in decoded -form. e.g., `hello%2Fworld` will represent the config named `hello/world`. - -Status code: 200 on success, 400 on invalid config name. -Response on success: - -``` -{ - "status": "success", - "data": { - "value": "/* YAML configuration */" - } -} -``` - -### Update config - -``` -PUT /agent/api/v1/config/{name} -POST /agent/api/v1/config/{name} -``` - -Update config updates or adds a new configuration by name. If a configuration -with the same name already exists, then it will be completely overwritten. - -URL-encoded names are stored in decoded form. e.g., `hello%2Fworld` will -represent the config named `hello/world`. - -The request body passed to this endpoint must match the format of [metrics_instance_config][metrics] -defined in the Configuration Reference. The name field of the configuration is -ignored and the name in the URL takes precedence. The request body must be -formatted as YAML. - -{{< admonition type="warning" >}} -By default, all instance configuration files that read -credentials from a file on disk will be rejected. This prevents malicious users -from reading the contents of arbitrary files as passwords and sending their -contents to fake remote_write endpoints. To change the behavior, set -`dangerous_allow_reading_files` to true in the `scraping_service` block. -{{< /admonition >}} - -Status code: 201 with a new config, 200 on updated config. -Response on success: - -``` -{ - "status": "success" -} -``` - -### Delete config - -``` -DELETE /agent/api/v1/config/{name} -``` - -Delete config attempts to delete a configuration by name. The named -configuration must exist; deleting a nonexistent config will result in an -error. - -URL-encoded names will be interpreted in decoded form. e.g., `hello%2Fworld` -will represent the config named `hello/world`. - -Status code: 200 on success, 400 with invalid config name. -Response on success: - -``` -{ - "status": "success" -} -``` - -## Agent API - -### List current running instances of metrics subsystem - -``` -GET /agent/api/v1/metrics/instances -``` - -{{< admonition type="note" >}} -The deprecated alias is `/agent/api/v1/instances` -{{< /admonition >}} - -Status code: 200 on success. -Response on success: - -``` -{ - "status": "success", - "data": [ - - ] -} -``` - -### List current scrape targets of metrics subsystem - -``` -GET /agent/api/v1/metrics/targets -``` - -{{< admonition type="note" >}} -The deprecated alias is `/agent/api/v1/targets` -{{< /admonition >}} - -This endpoint collects all metrics subsystem targets known to the Agent across all -running instances. Only targets being scraped from the local Agent will be returned. If -running in scraping service mode, this endpoint must be invoked in all Agents -separately to get the combined set of targets across the whole Agent cluster. - -The `labels` fields shows the labels that will be added to metrics from the -target, while the `discovered_labels` field shows all labels found during -service discovery. - -Status code: 200 on success. -Response on success: - -``` -{ - "status": "success", - "data": [ - { - "instance": , - "target_group": , - "endpoint": - "state": , - "discovered_labels": { - "__address__": "
", - ... - }, - "labels": { - "label_a": "value_a", - ... - }, - "last_scrape": , - "scrape_duration_ms": , - "scrape_error": - }, - ... - ] -} -``` - -### Accept remote_write requests - -``` -POST /agent/api/v1/metrics/instance/{instance}/write -``` - -This endpoint accepts Prometheus-compatible remote_write POST requests, and -appends their contents into an instance's WAL. - -Replace `{instance}` with the name of the metrics instance from your config -file. For example, this block defines the "dev" and "prod" instances: - -```yaml -metrics: - configs: - - name: dev # /agent/api/v1/metrics/instance/dev/write - ... - - name: prod # /agent/api/v1/metrics/instance/prod/write - ... -``` - -Status code: 204 on success, 400 for bad requests related to the provided -instance or POST payload format and content, 500 for cases where appending -to the WAL failed. - -### List current running instances of logs subsystem - -``` -GET /agent/api/v1/logs/instances -``` - -Status code: 200 on success. -Response on success: - -``` -{ - "status": "success", - "data": [ - - ] -} -``` - -### List current scrape targets of logs subsystem - -``` -GET /agent/api/v1/logs/targets -``` - -This endpoint collects all logs subsystem targets known to the Agent across -all running instances. Only targets being scraped from Promtail will be returned. - -The `labels` fields shows the labels that will be added to metrics from the -target, while the `discovered_labels` field shows all labels found during -service discovery. - -Status code: 200 on success. -Response on success: - -``` -{ - "status": "success", - "data": [ - { - "instance": "default", - "target_group": "varlogs", - "type": "File", - "labels": { - "job": "varlogs" - }, - "discovered_labels": { - "__address__": "localhost", - "__path__": "/var/log/*log", - "job": "varlogs" - }, - "ready": true, - "details": { - "/var/log/alternatives.log": 13386, - "/var/log/apport.log": 0, - "/var/log/auth.log": 37009, - "/var/log/bootstrap.log": 107347, - "/var/log/dpkg.log": 374420, - "/var/log/faillog": 0, - "/var/log/fontconfig.log": 11629, - "/var/log/gpu-manager.log": 1541, - "/var/log/kern.log": 782582, - "/var/log/lastlog": 0, - "/var/log/syslog": 788450 - } - } - ] -} -``` - -### Reload configuration file (beta) - -This endpoint is currently in beta and may have issues. Please open any issues -you encounter. - -``` -GET /-/reload -POST /-/reload -``` - -This endpoint will re-read the configuration file from disk and refresh the -entire state of the Agent to reflect the new file on disk: - -- HTTP Server -- Prometheus metrics subsystem -- Loki logs subsystem -- Tempo traces subsystem -- Integrations - -Valid configurations will be applied to each of the subsystems listed above, and -`/-/reload` will return with a status code of 200 once all subsystems have been -updated. Malformed configuration files (invalid YAML, failed validation checks) -will be immediately rejected with a status code of 400. - -Well-formed configuration files can still be invalid for various reasons, such -as not having permissions to read the WAL directory. Issues such as these will -cause per-subsystem problems while reloading the configuration, and will leave -that subsystem in an undefined state. Specific errors encountered during reload -will be logged, and should be fixed before calling `/-/reload` again. - -Status code: 200 on success, 400 otherwise. - -### Show configuration file - -``` -GET /-/config -``` - -This endpoint prints out the currently loaded configuration the Agent is using. -The returned YAML has defaults applied, and only shows changes to the state that -validated successfully, so the results will not identically match the -configuration file on disk. - -Status code: 200 on success. - -### Generate support bundle -``` -GET /-/support?duration=N -``` - -This endpoint returns a 'support bundle', a zip file that contains information -about a running agent, and can be used as a baseline of information when trying -to debug an issue. - -The duration parameter is optional, must be less than or equal to the -configured HTTP server write timeout, and if not provided, defaults to it. -The endpoint is only exposed to the agent's HTTP server listen address, which -defaults to `localhost:12345`. - -The support bundle contains all information in plain text, so that it can be -inspected before sharing, to verify that no sensitive information has leaked. - -In addition, you can inspect the [supportbundle package](https://github.com/grafana/agent/tree/main/internal/static/supportbundle) -to verify the code that is being used to generate these bundles. - -A support bundle contains the following data: -* `agent-config.yaml` contains the current agent configuration (when the `-config.enable-read-api` flag is passed). -* `agent-logs.txt` contains the agent logs during the bundle generation. -* `agent-metadata.yaml` contains the agent's build version, operating system, architecture, uptime, plus a string payload defining which extra agent features have been enabled via command-line flags. -* `agent-metrics-instances.json` and `agent-metrics-targets.json` contain the active metric subsystem instances and the discovered scrape targets for each one. -* `agent-logs-instances.json` and `agent-logs-targets.json` contains the active logs subsystem instances and the discovered log targets for each one. -* `agent-metrics.txt` contains a snapshot of the agent's internal metrics. -* The `pprof/` directory contains Go runtime profiling data (CPU, heap, goroutine, mutex, block profiles) as exported by the pprof package. - -## Integrations API (Experimental) - -> **WARNING**: This API is currently only available when the experimental -> [integrations revamp][integrations] -> is enabled. Both the revamp and this API are subject to change while they -> are still experimental. - -### Integrations SD API - -``` -GET /agent/api/v1/metrics/integrations/sd -``` - -This endpoint returns all running metrics-based integrations. It conforms to -the Prometheus [http_sd_config API](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#http_sd_config). -Targets include integrations regardless of autoscrape being enabled; this -allows for manually configuring scrape jobs to collect metrics from an -integration running on an external agent. - -The following labels will be present on all returned targets: - -- `instance`: The unique instance ID of the running integration. -- `job`: `integrations/<__meta_agent_integration_name>` -- `agent_hostname`: `hostname:port` of the agent running the integration. -- `__meta_agent_integration_name`: The name of the integration. -- `__meta_agent_integration_instance`: The unique instance ID for the running integration. -- `__meta_agent_integration_autoscrape`: `1` if autoscrape is enabled for this integration, `0` otherwise. - -To reduce the load on the agent's HTTP server, the following query parameters -may also be provided to the URL: - -- `integrations`: Comma-delimited list of integrations to return. i.e., `agent,node_exporter`. -- `instance`: Return all integrations matching a specific value for instance. - -Status code: 200 if successful. -Response on success: - -``` -[ - { - "targets": [ "", ... ], - "labels": { - "": "", ... - } - }, - ... -] -``` - -### Integrations autoscrape targets - -``` -GET /agent/api/v1/metrics/integrations/targets -``` - -This endpoint returns all integrations for which autoscrape is enabled. The -response is identical to [`/agent/api/v1/metrics/targets`](#list-current-scrape-targets-of-logs-subsystem). - -Status code: 200 on success. -Response on success: - -``` -{ - "status": "success", - "data": [ - { - "instance": , - "target_group": , - "endpoint": - "state": , - "discovered_labels": { - "__address__": "
", - ... - }, - "labels": { - "label_a": "value_a", - ... - }, - "last_scrape": , - "scrape_duration_ms": , - "scrape_error": - }, - ... - ] -} -``` - -## Ready / health API - -### Readiness check - -``` -GET /-/ready -``` - -Status code: 200 if ready. - -Response: -``` -Agent is Ready. -``` - -### Healthiness check - -``` -GET /-/healthy -``` - -Status code: 200 if healthy. - -Response: -``` -Agent is Healthy. -``` - -{{% docs/reference %}} -[scrape]: "/docs/agent/ -> /docs/agent//static/configuration/scraping-service" -[scrape]: "/docs/grafana-cloud/ -> ../configuration/scraping-service -[metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config" -[metrics]: "/docs/grafana-cloud/ -> ../configuration/metrics-config" -[integrations]: "/docs/agent/ -> /docs/agent//static/configuration/integrations/integrations-next" -[integrations]: "/docs/grafana-cloud/ -> ../configuration/integrations/integrations-next" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/_index.md b/docs/sources/static/configuration/_index.md deleted file mode 100644 index fa1a195bd6..0000000000 --- a/docs/sources/static/configuration/_index.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -aliases: -- ../configuration/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/ -- /docs/grafana-cloud/send-data/agent/static/configuration/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/ -description: Learn how to configure Grafana Agent in static mode -title: Configure static mode -weight: 300 ---- - -# Configure static mode - -The configuration of static mode is split across two places: - -* A YAML file -* [Command-line flags][flags] - -The YAML file is used to configure settings which are dynamic and can be -changed at runtime. The command-line flags then configure things which cannot -change at runtime, such as the listen port for the HTTP server. - -This file describes the YAML configuration, which is usually in a file named `config.yaml`. - -- [server_config][server] -- [metrics_config][metrics] -- [logs_config][logs] -- [traces_config][traces] -- [integrations_config][integrations] - -The configuration of Grafana Agent is "stable," but subject to breaking changes -as individual features change. Breaking changes to configuration will be -well-documented. - -## Updating configuration - -The configuration file can be reloaded at runtime using the `/-/reload` API -endpoint or sending a SIGHUP signal to the process. - -## Variable substitution - -You can use environment variables in the configuration file to set values that -need to be configurable during deployment. To enable this functionality, you -must pass `-config.expand-env` as a command-line flag to the Agent. - -To refer to an environment variable in the config file, use: - -``` -${VAR} -``` - -Where VAR is the name of the environment variable. - -Each variable reference is replaced at startup by the value of the environment -variable. The replacement is case-sensitive and occurs before the YAML file is -parsed. References to undefined variables are replaced by empty strings unless -you specify a default value or custom error text. - -To specify a default value, use: - -``` -${VAR:-default_value} -``` - -Where default_value is the value to use if the environment variable is -undefined. The full list of supported syntax can be found at Drone's -[envsubst repository](https://github.com/drone/envsubst). - -### Regex capture group references - -When using `-config.expand-env`, `VAR` must be an alphanumeric string with at -least one non-digit character. If `VAR` is a number, the expander will assume -you're trying to use a regex capture group reference, and will coerce the result -to be one. - -This means references in your config file like `${1}` will remain -untouched, but edge cases like `${1:-default}` will also be coerced to `${1}`, -which may be slightly unexpected. - -## Reloading (beta) - -The configuration file can be reloaded at runtime. Read the [API documentation][api] for more information. - -This functionality is in beta, and may have issues. Please open GitHub issues -for any problems you encounter. - -## File format - -To specify which configuration file to load, pass the `-config.file` flag at -the command line. The file is written in the [YAML -format](https://en.wikipedia.org/wiki/YAML), defined by the scheme below. -Brackets indicate that a parameter is optional. For non-list parameters the -value is set to the specified default. - -Generic placeholders are defined as follows: - -- ``: a boolean that can take the values `true` or `false` -- ``: any integer matching the regular expression `[1-9]+[0-9]*` -- ``: a duration matching the regular expression `[0-9]+(ns|us|µs|ms|[smh])` -- ``: a string matching the regular expression `[a-zA-Z_][a-zA-Z0-9_]*` -- ``: a string of unicode characters -- ``: a valid path relative to current working directory or an - absolute path. -- ``: a valid string consisting of a hostname or IP followed by an optional port number -- ``: a regular string -- ``: a regular string that is a secret, such as a password - -Support contents and default values of `config.yaml`: - -```yaml -# Configures the server of the Agent used to enable self-scraping. -[server: ] - -# Configures metric collection. -# In previous versions of the agent, this field was called "prometheus". -[metrics: ] - -# Configures log collection. -# In previous versions of the agent, this field was called "loki". -[logs: ] - -# Configures Traces trace collection. -# In previous versions of the agent, this field was called "tempo". -[traces: ] - -# Configures integrations for the Agent. -[integrations: ] -``` - -## Remote Configuration (Experimental) - -An experimental feature for fetching remote configuration files over HTTP/S can be -enabled by passing the `-enable-features=remote-configs` flag at the command line. -With this feature enabled, you may pass an HTTP/S URL to the `-config.file` flag. - -The following flags will configure basic auth for requests made to HTTP/S remote config URLs: -- `-config.url.basic-auth-user `: the basic auth username -- `-config.url.basic-auth-password-file `: path to a file containing the basic auth password - -{{< admonition type="note" >}} -This beta feature is subject to change in future releases. -{{< /admonition >}} - -{{% docs/reference %}} -[flags]: "/docs/agent/ -> /docs/agent//static/configuration/flags" -[flags]: "/docs/grafana-cloud/ -> ./flags" -[server]: "/docs/agent/ -> /docs/agent//static/configuration/server-config" -[server]: "/docs/grafana-cloud/ -> ./server-config" -[metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config" -[metrics]: "/docs/grafana-cloud/ -> ./metrics-config" -[logs]: "/docs/agent/ -> /docs/agent//static/configuration/logs-config" -[logs]: "/docs/grafana-cloud/ -> ./logs-config" -[traces]: "/docs/agent/ -> /docs/agent//static/configuration/traces-config" -[traces]: "/docs/grafana-cloud/ -> ./traces-config" -[integrations]: "/docs/agent/ -> /docs/agent//static/configuration/integrations" -[integrations]: "/docs/grafana-cloud/ -> ./integrations" -[api]: "/docs/agent/ -> /docs/agent//static/api#reload-configuration-file-beta" -[api]: "/docs/grafana-cloud/ -> ../api#reload-configuration-file-beta" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/agent-management.md b/docs/sources/static/configuration/agent-management.md deleted file mode 100644 index af327bb17b..0000000000 --- a/docs/sources/static/configuration/agent-management.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/agent-management/ -- /docs/grafana-cloud/send-data/agent/static/configuration/agent-management/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/agent-management/ -description: Learn about Agent Management -menuTitle: Agent Management -title: Agent Management - Experimental -weight: 700 ---- - -# Agent Management - Experimental - -**Agent Management is under active development. Backwards incompatible changes to its API are to be expected. Feedback is much appreciated. This is a feature that MAY NOT make it production.** - -Agent Management enables centralized management of fleets of Grafana Agents. - -In this mode, Grafana Agent polls and dynamically reloads its configuration from a remote API server. - -Remote Configurations are composed of a base configuration and a set of snippets. Snippets are applied conditionally via label matching. - -## Configuration - -Agent Management can be used by passing the flag `-enable-features=agent-management`. When enabled, the file referred to `-config.file` will be loaded as an agent management configuration file. - -Agent Management configuration files are YAML documents which conform the following schema: - -```yaml -# Agent Management configuration. -agent_management: - # Host of the API server to connect to. - host: - - # Protocol to use when connecting to the API server (http|https). - protocol: - - # The polling interval for fetching the configuration. - polling_interval: - - # Sets the `Authorization` header on every request with the - # configured username and password. - basic_auth: - [ username: ] - [ password_file: ] - - # Optional proxy URL. - [ proxy_url: ] - - # Comma-separated string that can contain IPs, CIDR notation, domain names - # that should be excluded from proxying. IP and domain names can - # contain port numbers. - [ no_proxy: ] - - # Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) - [ proxy_from_environment: | default: false ] - - # Specifies headers to send to proxies during CONNECT requests. - [ proxy_connect_header: - [ : [, ...] ] ] - - # Fields specific to remote configuration. - remote_configuration: - # A path to a directory where the remote configuration will be cached. The directory must be writeable. - cache_location: - - # The namespace to use. - namespace: - - # Set of self-identifying labels used for snippet selection. - labels: - [ : ... ] - - # Whether to use labels from the label management service. If enabled, labels from the API supersede the ones configured in the agent. The agent_id field must be defined. - label_management_enabled: | default = false - - # A unique ID for the agent, which is used to identify the agent. - agent_id: - - # Whether to accept HTTP 304 Not Modified responses from the API server. If enabled, the agent will use the cached configuration if the API server responds with HTTP 304 Not Modified. You can set this argument to `false` for debugging or testing. - accept_http_not_modified: | default = true -``` - -## API - -Grafana Agents with Agent Management enabled continuously poll the API server for an up-to-date configuration. The API server is expected to implement a `GET /agent-management/api/agent/v2/namespace/:namespace/remote_config` HTTP endpoint returning a successful response with the following body format: - -```yaml -# The base configuration for the Agent. -base_config: | - -# A set of snippets to be conditionally merged into the base configuration. -snippets: - [ : ... ] -``` - -### grafana_agent_config - -This is a standard Grafana Agent [static mode configuration](/docs/agent/latest/static/configuration/). Typically used to configure the server, remote_writes, and other global configuration. - -### snippet_content - -The snippet content is a YAML document which conforms to the following schema: - -```yaml -# Config provides the actual snippet configuration. -config: | - [metrics_scrape_configs]: - - [ ... ] - [logs_scrape_configs]: - - [ ... ] - [integration_configs]: - [ ... ] -# Selector is a set of labels used to decide which snippets to apply to the final configuration. -selector: - [ : ... ] -``` - -> **Note:** More information on the following types can be found in their respective documentation pages: -> -> * [`scrape_config`](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config) -> * [`promtail.scrape_config`](/docs/loki/latest/clients/promtail/configuration/#scrape_configs) -> * [`integrations_config`](/docs/agent/latest/static/configuration/integrations) - -> **Note:** Snippet selection is currently done in the API server. This behaviour is subject to change in the future. - -### Example response body - -```yaml -base_config: | - server: - log_level: info - metrics: - global: - remote_write: - - basic_auth: - password_file: key.txt - username: 123 - url: https://myserver.com/api/prom/push - logs: - positions_directory: /var/lib/grafana-agent - global: - clients: - - basic_auth: - password_file: key.txt - username: 456 - url: https://myserver.com/loki/api/v1/push -snippets: - snip1: - config: | - metrics_scrape_configs: - - job_name: 'prometheus' - scrape_interval: 60s - static_configs: - - targets: ['localhost:9090'] - logs_scrape_configs: - - job_name: 'loki' - static_configs: - - targets: ['localhost:3100'] - integration_configs: - node_exporter: - enabled: true - selector: - os: linux - app: app1 -``` - -> **Note:** Base configurations and snippets can contain go's [text/template](https://pkg.go.dev/text/template) actions. If you need preserve the literal value of a template action, you can escape it using backticks. For example: - -``` -{{ `{{ .template_var }}` }} -``` diff --git a/docs/sources/static/configuration/create-config-file.md b/docs/sources/static/configuration/create-config-file.md deleted file mode 100644 index e4d77a3386..0000000000 --- a/docs/sources/static/configuration/create-config-file.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -aliases: -- ../../configuration/create-config-file/ -- ../../set-up/create-config-file/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/create-config-file/ -- /docs/grafana-cloud/send-data/agent/static/configuration/create-config-file/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/create-config-file/ -description: Learn how to create a configuration file -title: Create a configuration file -weight: 50 ---- - -# Create a configuration file - -The Grafana Agent supports configuring multiple independent "subsystems." Each -subsystem helps you collect data for a specific type of telemetry. - -- The **Metrics** subsystem allows you collect metrics to send to Prometheus. -- The **Logs** subsystem allows you to collect logs to send to Grafana Loki. -- The **Traces** subsystem allows you to collect spans to send to Grafana Tempo. -- The **Integrations** subsystem allows you to collect metrics for common - applications, such as MySQL. - -Integrations are recommended for first-time users of observability platforms, -especially newcomers to Prometheus. Users with more experience with Prometheus -or users that already have an existing Prometheus config file can configure -the Prometheus subsystem manually. - -## Integrations - -_Integrations_ are individual features that collect metrics for you. For -example, the `agent` integration collects metrics from that running instance of -the Grafana Agent. The `node_exporter` integration will collect metrics from the -Linux machine that the Grafana Agent is running on. - -```yaml -metrics: - wal_directory: /tmp/wal - global: - remote_write: - - url: http://localhost:9009/api/prom/push - -integrations: - agent: - enabled: true -``` - -In this example, we first must configure the `wal_directory` which is used to -store metrics in a Write-Ahead Log (WAL). The WAL is required and ensures that samples -will be redelivered in case of failure (e.g., network issues, machine reboot). We -also configure `remote_write`, which is where all metrics should be sent by -default. - -Then, the individual `integrations` are configured. In this example, just the -`agent` integration is enabled. Finally, `prometheus_remote_write` is configured -with a location to send metrics. You will have to replace this URL with the -appropriate URL for your `remote_write` system (such as a Grafana Cloud Hosted -Prometheus instance). - -When the Agent is run with this file, it will collect metrics from itself and -send those metrics to the default `remote_write` endpoint. All metrics from -integrations will have an `instance` label matching the hostname of the machine -the Grafana Agent is running on. This label helps to uniquely identify the -source of metrics if you are running multiple Grafana Agents across multiple -machines. - -Full configuration options can be found in the [configuration reference][configure]. - -## Prometheus config/migrating from Prometheus - -The Prometheus subsystem config is useful for those migrating from Prometheus -and those who want to scrape metrics from something that currently does not have -an associated integration. - -To migrate from an existing Prometheus config, use this Agent config as a -template and copy and paste subsections from your existing Prometheus config -into it: - -```yaml -metrics: - global: - # PASTE PROMETHEUS global SECTION HERE - configs: - - name: agent - scrape_configs: - # PASTE scrape_configs SECTION HERE - remote_write: - # PASTE remote_write SECTION HERE -``` - -For example, this configuration file configures the Grafana Agent to -scrape itself without using the integration: - -```yaml -server: - log_level: info - -metrics: - global: - scrape_interval: 1m - configs: - - name: agent - scrape_configs: - - job_name: agent - static_configs: - - targets: ['127.0.0.1:12345'] - remote_write: - - url: http://localhost:9009/api/prom/push -``` - -Like with integrations, full configuration options can be found in the -[configuration][configure]. - -## Loki Config/Migrating from Promtail - -The Loki Config allows for collecting logs to send to a Loki API. Users that are -familiar with Promtail will notice that the Loki config for the Agent matches -their existing Promtail config with the following exceptions: - -- The deprecated field `client` is not present -- The `server` field is not present - -To migrate from an existing Promtail config, make sure you are using `clients` -instead of `client` and remove the `server` block if present. Then paste your -Promtail config into the Agent config file inside of a `logs` section: - -```yaml -logs: - configs: - - name: default - # PASTE YOUR PROMTAIL CONFIG INSIDE OF HERE -``` - -### Full config example - -Here is an example full config file, using integrations, Prometheus, Loki, and -Tempo: - -```yaml -server: - log_level: info - -metrics: - global: - scrape_interval: 1m - remote_write: - - url: http://localhost:9009/api/prom/push - configs: - - name: default - scrape_configs: - - job_name: agent - static_configs: - - targets: ['127.0.0.1:12345'] - -logs: - configs: - - name: default - positions: - filename: /tmp/positions.yaml - scrape_configs: - - job_name: varlogs - static_configs: - - targets: [localhost] - labels: - job: varlogs - __path__: /var/log/*log - clients: - - url: http://localhost:3100/loki/api/v1/push - -traces: - configs: - - name: default - receivers: - jaeger: - protocols: - grpc: # listens on the default jaeger grpc port: 14250 - remote_write: - - endpoint: localhost:55680 - insecure: true # only add this if TLS is not required - batch: - timeout: 5s - send_batch_size: 100 - -integrations: - node_exporter: - enabled: true -``` - -{{% docs/reference %}} -[configure]: "/docs/agent/ -> /docs/agent//static/configuration" -[configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/flags.md b/docs/sources/static/configuration/flags.md deleted file mode 100644 index 42dc3fb12c..0000000000 --- a/docs/sources/static/configuration/flags.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -aliases: -- ../../configuration/flags/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/flags/ -- /docs/grafana-cloud/send-data/agent/static/configuration/flags/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/flags/ -description: Learn about command-line flags -title: Command-line flags -weight: 100 ---- - -# Command-line flags - -Command-line flags are used to configure settings of Grafana Agent which cannot -be updated at runtime. - -All flags may be prefixed with either one hyphen or two (i.e., both -`-config.file` and `--config.file` are valid). - -> Note: There may be flags returned by `-help` which are not listed here; this -> document only lists flags that do not have an equivalent in the YAML file. - -## Basic - -* `-version`: Print out version information -* `-help`: Print out help - -## Experimental feature flags - -Grafana Agent has some experimental features that require being enabled through -an `-enable-features` flag. This flag takes a comma-delimited list of feature -names to enable. - -Valid feature names are: - -* `remote-configs`: Enable [retrieving][retrieving] config files over HTTP/HTTPS -* `integrations-next`: Enable [revamp][revamp] of the integrations subsystem -* `extra-scrape-metrics`: When enabled, additional time series are exposed for each metrics instance scrape. See [Extra scrape metrics](https://prometheus.io/docs/prometheus/2.45/feature_flags/#extra-scrape-metrics). -* `agent-management`: Enable support for [agent management][management]. - -## Report information usage - -By default, Grafana Agent sends anonymous, but uniquely-identifiable usage information -from your running Grafana Agent instance to Grafana Labs. -These statistics are sent to `stats.grafana.org`. - -Statistics help us better understand how Grafana Agent is used. -This helps us prioritize features and documentation. - -The usage information includes the following details: -* A randomly generated and an anonymous unique ID (UUID). -* Timestamp of when the UID was first generated. -* Timestamp of when the report was created (by default, every 4h). -* Version of running Grafana Agent. -* Operating system Grafana Agent is running on. -* System architecture Grafana Agent is running on. -* List of enabled feature flags. -* List of enabled integrations. - -This list may change over time. All newly reported data will also be documented in the CHANGELOG. - -If you would like to disable the reporting, Grafana Agent provides the flag `-disable-reporting` -to stop the reporting. - -## Support bundles -Grafana Agent allows the exporting of 'support bundles' on the `/-/support` -endpoint. Support bundles are zip files containing commonly-used information -that provide a baseline for debugging issues with the Agent. - -Support bundles contain all information in plain text, so that they can be -inspected before sharing to verify that no sensitive information has leaked. - -Support bundles contain the following data: -* `agent-config.yaml` contains the current agent configuration (when the `-config.enable-read-api` flag is passed). -* `agent-logs.txt` contains the agent logs during the bundle generation. -* `agent-metadata.yaml` contains the agent's build version, operating system, architecture, uptime, plus a string payload defining which extra agent features have been enabled via command-line flags. -* `agent-metrics-instances.json` and `agent-metrics-targets.json` contain the active metric subsystem instances, and the discovered scraped targets for each one. -* `agent-logs-instances.json` and `agent-logs-targets.json` contains the active logs subsystem instances and the discovered log targets for each one. -* `agent-metrics.txt` contains a snapshot of the agent's internal metrics. -* The `pprof/` directory contains Go runtime profiling data (CPU, heap, goroutine, mutex, block profiles) as exported by the pprof package. - -To disable the endpoint that exports these support bundles, you can pass in the -`-disable-support-bundle` command-line flag. - -## Configuration file - -* `-config.file`: Path to the configuration file to load. May be an HTTP(s) URL when the `remote-configs` feature is enabled. -* `-config.file.type`: Type of file which `-config.file` refers to (default `yaml`). Valid values are `yaml` and `dynamic`. -* `-config.expand-env`: Expand environment variables in the loaded configuration file -* `-config.enable-read-api`: Enables the `/-/config` and `/agent/api/v1/configs/{name}` API endpoints to print YAML configuration - -### Remote Configuration - -These flags require the `remote-configs` feature to be enabled: - -`-config.url.basic-auth-user`: Basic Authentication username to use when fetching the remote configuration file -`-config.url.basic-auth-password-file`: File containing a Basic Authentication password to use when fetching the remote configuration file - -## Server - -* `-server.register-instrumentation`: Expose the `/metrics` and `/debug/pprof/` instrumentation handlers over HTTP (default true) -* `-server.graceful-shutdown-timeout`: Timeout for a graceful server shutdown -* `-server.log.source-ips.enabled`: Whether to log IP addresses of incoming requests -* `-server.log.source-ips.header`: Header field to extract incoming IP requests from (defaults to Forwarded, X-Real-IP, X-Forwarded-For) -* `-server.log.source-ips.regex`: Regex to extract the IP out of the read header, using the first capture group as the IP address -* `-server.http.network`: HTTP server listen network (default `tcp`) -* `-server.http.address`: HTTP server listen:port (default `127.0.0.1:12345`) -* `-server.http.enable-tls`: Enable TLS for the HTTP server -* `-server.http.conn-limit`: Maximum number of simultaneous HTTP connections -* `-server.http.idle-timeout`: HTTP server idle timeout -* `-server.http.read-timeout`: HTTP server read timeout -* `-server.http.write-timeout`: HTTP server write timeout -* `-server.http.in-memory-addr`: Internal address used for the agent to make - in-memory HTTP connections to itself. (default `agent.internal:12345`) The - port number specified here is virtual and does not open a real network port. -* `-server.grpc.network` gRPC server listen network (default `grpc`) -* `-server.grpc.address`: gRPC server listen host:port (default `127.0.0.1:12346`) -* `-server.grpc.enable-tls`: Enable TLS for the gRPC server -* `-server.grpc.conn-limit`: Maximum number of simultaneous gRPC connections -* `-server.grpc.keepalive.max-connection-age` Maximum age for any gRPC connection for a graceful shutdown -* `-server.grpc.keepalive.max-connection-age-grace` Grace period to forcibly close connections after a graceful shutdown starts -* `-server.grpc.keepalive.max-connection-idle` Time to wait before closing idle gRPC connections -* `-server.grpc.keepalive.min-time-between-pings` Maximum frequency that clients may send pings at -* `-server.grpc.keepalive.ping-without-stream-allowed` Allow clients to send pings without having a gRPC stream -* `-server.grpc.keepalive.time` Frequency to send keepalive pings from the server -* `-server.grpc.keepalive.timeout` How long to wait for a keepalive pong before closing the connection -* `-server.grpc.max-concurrent-streams` Maximum number of concurrent gRPC streams (0 = unlimited) -* `-server.grpc.max-recv-msg-size-bytes` Maximum size in bytes for received gRPC messages -* `-server.grpc.max-send-msg-size-bytes` Maximum size in bytes for send gRPC messages -* `-server.grpc.in-memory-addr`: Internal address used for the agent to make - in-memory gRPC connections to itself. (default `agent.internal:12346`). The - port number specified here is virtual and does not open a real network port. - -### TLS Support - -TLS support can be enabled with `-server.http.tls-enabled` and -`-server.grpc.tls-enabled` for the HTTP and gRPC servers respectively. - -`server.http_tls_config` and `integrations.http_tls_config` must be set in the -YAML configuration when the `-server.http.tls-enabled` flag is used. - -`server.grpc_tls_config` must be set in the YAML configuration when the -`-server.grpc.tls-enabled` flag is used. - -## Metrics - -* `-metrics.wal-directory`: Directory to store the metrics Write-Ahead Log in - -{{% docs/reference %}} -[retrieving]: "/docs/agent/ -> /docs/agent//static/configuration#remote-configuration-experimental" -[retrieving]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration#remote-configuration-experimental" - -[revamp]: "/docs/agent/ -> /docs/agent//static/configuration/integrations/integrations-next/" -[revamp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next" - -[management]: "/docs/agent/ -> /docs/agent//static/configuration/agent-management" -[management]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/agent-management" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/integrations/_index.md b/docs/sources/static/configuration/integrations/_index.md deleted file mode 100644 index f0053c2749..0000000000 --- a/docs/sources/static/configuration/integrations/_index.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -aliases: -- ../../configuration/integrations/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/ -description: Learn about integrations_config -title: integrations_config -weight: 500 ---- - -# integrations_config - -The `integrations_config` block configures how the Agent runs integrations that -scrape and send metrics without needing to run specific Prometheus exporters or -manually write `scrape_configs`: - -```yaml -# Controls the Agent integration -agent: - # Enables the Agent integration, allowing the Agent to automatically - # collect and send metrics about itself. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the agent integration will be run but not scraped and thus not - # remote_written. Metrics for the integration will be exposed at - # /integrations/agent/metrics and can be scraped by an external process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - -# Client TLS Configuration -# Client Cert/Key Values need to be defined if the server is requesting a certificate -# (Client Auth Type = RequireAndVerifyClientCert || RequireAnyClientCert). -http_tls_config: - -# Controls the apache_http integration -apache_http: - -# Controls the node_exporter integration -node_exporter: - -# Controls the process_exporter integration -process_exporter: - -# Controls the mysqld_exporter integration -mysqld_exporter: - -# Controls the oracledb integration -oracledb: - -# Controls the redis_exporter integration -redis_exporter: - -# Controls the dnsmasq_exporter integration -dnsmasq_exporter: - -# Controls the elasticsearch_exporter integration -elasticsearch_exporter: - -# Controls the memcached_exporter integration -memcached_exporter: - -# Controls the mssql integration -mssql: - -# Controls the postgres_exporter integration -postgres_exporter: - -# Controls the snmp_exporter integration -snmp_exporter: - -# Controls the snowflake integration -snowflake: - -# Controls the statsd_exporter integration -statsd_exporter: - -# Controls the consul_exporter integration -consul_exporter: - -# Controls the windows_exporter integration -windows_exporter: - -# Controls the kafka_exporter integration -kafka_exporter: - -# Controls the mongodb_exporter integration -mongodb_exporter: - -# Controls the github_exporter integration -github_exporter: - -# Controls the blackbox_exporter integration -blackbox: - -# Controls the CloudWatch exporter integration -cloudwatch_exporter: - -# Controls the azure_exporter integration -azure_exporter: - -# Controls the gcp_exporter integration -gcp_exporter: - -# Controls the squid integration -squid: - -# Automatically collect metrics from enabled integrations. If disabled, -# integrations will be run but not scraped and thus not remote_written. Metrics -# for integrations will be exposed at /integrations//metrics -# and can be scraped by an external process. -[scrape_integrations: | default = true] - -# Extra labels to add to all samples coming from integrations. -labels: - { : } - -# The period to wait before restarting an integration that exits with an -# error. -[integration_restart_backoff: | default = "5s"] - -# A list of remote_write targets. Defaults to global_config.remote_write. -# If provided, overrides the global defaults. -prometheus_remote_write: - - [] -``` diff --git a/docs/sources/static/configuration/integrations/apache-exporter-config.md b/docs/sources/static/configuration/integrations/apache-exporter-config.md deleted file mode 100644 index 3edce2f275..0000000000 --- a/docs/sources/static/configuration/integrations/apache-exporter-config.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/apache-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/apache-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/apache-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/apache-exporter-config/ -description: Learn about apache_http_config -title: apache_http_config ---- - -# apache_http_config - -The `apache_http_config` block configures the `apache_http` integration, -which is an embedded version of -[`apache_exporter`](https://github.com/Lusitaniae/apache_exporter). This allows the collection of Apache [mod_status](https://httpd.apache.org/docs/current/mod/mod_status.html) statistics via HTTP. - -Full reference of options: - -```yaml - # Enables the apache_http integration, allowing the Agent to automatically - # collect metrics for the specified apache http servers. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname portion - # of api_url. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the apache_http integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/apache_http/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # URI to apache stub status page. - # If your server-status page is secured by http auth, add the credentials to the scrape URL following this example: - # http://user:password@localhost/server-status?auto . - [scrape_uri: | default = "http://localhost/server-status?auto"] - - # Override for HTTP Host header; empty string for no override. - [host_override: | default = ""] - - # Ignore server certificate if using https. - [insecure: | default = false] - -``` diff --git a/docs/sources/static/configuration/integrations/azure-exporter-config.md b/docs/sources/static/configuration/integrations/azure-exporter-config.md deleted file mode 100644 index d2aa146dfc..0000000000 --- a/docs/sources/static/configuration/integrations/azure-exporter-config.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/azure-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/azure-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/azure-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/azure-exporter-config/ -description: Learn about azure_exporter_config -title: azure_exporter_config ---- - -# azure_exporter_config - -## Overview -The `azure_exporter_config` block configures the `azure_exporter` integration, an embedded version of -[`azure-metrics-exporter`](https://github.com/webdevops/azure-metrics-exporter), used to -collect metrics from [Azure Monitor](https://azure.microsoft.com/en-us/products/monitor). - -The exporter offers the following two options for gathering metrics. - -1. (Default) Use an [Azure Resource Graph](https://azure.microsoft.com/en-us/get-started/azure-portal/resource-graph/#overview) query to identify resources for gathering metrics. - 1. This query will make one API call per resource identified. - 1. Subscriptions with a reasonable amount of resources can hit the [12000 requests per hour rate limit](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling#subscription-and-tenant-limits) Azure enforces. -1. Set the regions to gather metrics from and get metrics for all resources across those regions. - 1. This option will make one API call per subscription, dramatically reducing the number of API calls. - 1. This approach does not work with all resource types, and Azure does not document which resource types do or do not work. - 1. A resource type that is not supported produces errors that look like `Resource type: microsoft.containerservice/managedclusters not enabled for Cross Resource metrics`. - 1. If you encounter one of these errors you must use the default Azure Resource Graph based option to gather metrics. - -## List of Supported Services and Metrics -The exporter supports all metrics defined by Azure Monitor. The complete list of available metrics can be found in the [Azure Monitor documentation](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported). -Metrics for this integration are exposed with the template `azure_{type}_{metric}_{aggregation}_{unit}`. As an example, -the Egress metric for BlobService would be exported as `azure_microsoft_storage_storageaccounts_blobservices_egress_total_bytes`. - -## Authentication - -The agent must be running in an environment with access to Azure. The exporter uses the Azure SDK for go and supports authentication via https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication?tabs=bash#2-authenticate-with-azure. - -The account used by Grafana Agent needs: -* [Read access to the resources that will be queried by Resource Graph](https://learn.microsoft.com/en-us/azure/governance/resource-graph/overview#permissions-in-azure-resource-graph) -* Permissions to call the [Microsoft.Insights Metrics API](https://learn.microsoft.com/en-us/rest/api/monitor/metrics/list) which should be the `Microsoft.Insights/Metrics/Read` permission - -## Configuration - -### Config Reference - -```yaml - # - # Common Integration Settings - # - - # Enables the azure_exporter integration, allowing the Agent to automatically collect metrics or expose azure metrics - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is self-scraped. Default will be - # based on subscriptions and ResourceType being monitored - [instance: ] - - # Automatically collect metrics from this integration. If disabled, the exporter integration will be run but not - # scraped and thus not. remote-written. Metrics for the integration will be exposed at - # /integrations/azure_exporter/metrics and can be scraped by an external process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter specific configuration - # - - # Required: The azure subscription(s) to scrape metrics from - subscriptions: - [ - ... ] - - # Required: The Azure Resource Type to scrape metrics for - # Valid values can be found as the heading names on this page https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported - # Ex: Microsoft.Cache/redis - [resource_type: ] - - # Required: The metrics to scrape from resources - # Valid values can be found in the `Metric` column for the`resource_type` https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported - # Example: - # resource_type: Microsoft.Cache/redis - # metrics: - # - allcachehits - metrics: - [ - ... ] - - # Optional: The [kusto query](https://learn.microsoft.com/en-us/azure/data-explorer/kusto/query/) filter to apply when searching for resources - # This value will be embedded in to a template query of the form `Resources | where type =~ "" | project id, tags` - # Can't be used if `regions` is set. - [resource_graph_query_filter: ] - - # Optional: The list of regions for gathering metrics. Enables gathering metrics for all resources in the subscription. - # The list of available `regions` to your subscription can be found by running the Azure CLI command `az account list-locations --query '[].name'`. - # Can't be used if `resource_graph_query_filter` is set. - regions: - [ - ... ] - - # Optional: Aggregation to apply for the metrics produced. Valid values are minimum, maximum, average, total, and count - # If no aggregation is specified the value for `Aggregation Type` on the `Metric` is used from https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported - metric_aggregations: - [ - ... ] - - # Optional: An [ISO8601 Duration](https://en.wikipedia.org/wiki/ISO_8601#Durations) used when querying the metric value - [timespan: | default = "PT1M"] - - # Optional: Used to include `Dimensions` available to a `Metric` definition https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported - # These will appear as labels on the metrics, - # If a single dimension is requested it will have the name `dimension` - # If multiple dimensions are requested they will have the name `dimension` - # Example: - # resource_type: Microsoft.Cache/redis - # metrics: - # - allcachehits - # included_dimensions: - # - ShardId - # - Port - # - Primary - included_dimensions: - [ - ... ] - - # Optional: A list of resource tags to include on the final metrics - # These are added as labels with the name `tag_` - included_resource_tags: - [ - ... ] - - # Optional: used for ResourceTypes which have multiple levels of metrics - # Example: the resource_type Microsoft.Storage/storageAccounts has metrics for - # Microsoft.Storage/storageAccounts (generic metrics which apply to all storage accounts) - # Microsoft.Storage/storageAccounts/blobServices (generic metrics + metrics which only apply to blob stores) - # Microsoft.Storage/storageAccounts/fileServices (generic metrics + metrics which only apply to file stores) - # Microsoft.Storage/storageAccounts/queueServices (generic metrics + metrics which only apply to queue stores) - # Microsoft.Storage/storageAccounts/tableServices (generic metrics + metrics which only apply to table stores) - # If you want blob store metrics you will need to set - # resource_type: Microsoft.Storage/storageAccounts - # metric_namespace = Microsoft.Storage/storageAccounts/blobServices - [metric_namespace: ] - - # Optional: Which azure cloud environment to connect to, azurecloud, azurechinacloud, azuregovernmentcloud, or azurepprivatecloud - [azure_cloud_environment: | default = "azurecloud"] - - # Optional: Validation is disabled by default to reduce the number of Azure exporter instances required when a `resource_type` has metrics with varying dimensions. - # Choosing to enable `validate_dimensions` will require one exporter instance per metric + dimension combination which can be very tedious to maintain. - [validate_dimensions: | default = false] -``` - -### Examples - -#### Azure Kubernetes Service Node Metrics -```yaml - azure_exporter: - enabled: true - scrape_interval: 60s - subscriptions: - - - resource_type: microsoft.containerservice/managedclusters - metrics: - - node_cpu_usage_millicores - - node_cpu_usage_percentage - - node_disk_usage_bytes - - node_disk_usage_percentage - - node_memory_rss_bytes - - node_memory_rss_percentage - - node_memory_working_set_bytes - - node_memory_working_set_percentage - - node_network_in_bytes - - node_network_out_bytes - included_resource_tags: - - environment - included_dimensions: - - node - - nodepool - - device -``` - -#### Blob Storage Metrics -```yaml - azure_exporter: - enabled: true - scrape_interval: 60s - subscriptions: - - - resource_type: Microsoft.Storage/storageAccounts - metric_namespace: Microsoft.Storage/storageAccounts/blobServices - regions: - - westeurope - metrics: - - Availability - - BlobCapacity - - BlobCount - - ContainerCount - - Egress - - IndexCapacity - - Ingress - - SuccessE2ELatency - - SuccessServerLatency - - Transactions - included_dimensions: - - ApiName - - TransactionType - timespan: PT1H -``` - -### Multiple Azure Services in a single config - -The Azure Metrics API has rather strict limitations on the number of parameters which can be supplied. Due to this, you cannot -gather metrics from multiple `resource_types` in the same `azure_exporter` instance. If you need metrics from multiple resources, -you can enable `integration-next` or configure Agent to expose the exporter via the `azure_exporter` config with data configured through metrics scrape_configs. The following example configuration combines the two examples above in a single Agent configuration. - -> **Note**: This is not a complete configuration; blocks have been removed for simplicity. - -```yaml -integrations: - azure_exporter: - enabled: true - scrape_integration: false - azure_cloud_environment: azurecloud - -metrics: - configs: - - name: integrations - scrape_configs: - - job_name: azure-blob-storage - scrape_interval: 1m - scrape_timeout: 50s - static_configs: - - targets: ["localhost:12345"] - metrics_path: /integrations/azure_exporter/metrics - params: - subscriptions: - - 179c4f30-ebd8-489e-92bc-fb64588dadb3 - resource_type: ["Microsoft.Storage/storageAccounts"] - regions: - - westeurope - metric_namespace: ["Microsoft.Storage/storageAccounts/blobServices"] - metrics: - - Availability - - BlobCapacity - - BlobCount - - ContainerCount - - Egress - - IndexCapacity - - Ingress - - SuccessE2ELatency - - SuccessServerLatency - - Transactions - included_dimensions: - - ApiName - - TransactionType - timespan: ["PT1H"] - - job_name: azure-kubernetes-node - scrape_interval: 1m - scrape_timeout: 50s - static_configs: - - targets: ["localhost:12345"] - metrics_path: /integrations/azure_exporter/metrics - params: - subscriptions: - - 179c4f30-ebd8-489e-92bc-fb64588dadb3 - resource_type: ["microsoft.containerservice/managedclusters"] - resource_graph_query_filter: [" where location == 'westeurope'"] - metrics: - - node_cpu_usage_millicores - - node_cpu_usage_percentage - - node_disk_usage_bytes - - node_disk_usage_percentage - - node_memory_rss_bytes - - node_memory_rss_percentage - - node_memory_working_set_bytes - - node_memory_working_set_percentage - - node_network_in_bytes - - node_network_out_bytes - included_resource_tags: - - environment - included_dimensions: - - node - - nodepool - - device -``` - -In this example, all `azure_exporter`-specific configuration settings have been moved to the `scrape_config`. This method supports all available configuration options except `azure_cloud_environment`, which must be configured on the `azure_exporter`. For this method, if a field supports a singular value like `resource_graph_query_filter`, you -must be put it into an array, for example, `resource_graph_query_filter: ["where location == 'westeurope'"]`. diff --git a/docs/sources/static/configuration/integrations/blackbox-config.md b/docs/sources/static/configuration/integrations/blackbox-config.md deleted file mode 100644 index 77a592ddb0..0000000000 --- a/docs/sources/static/configuration/integrations/blackbox-config.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/blackbox-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/blackbox-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/blackbox-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/blackbox-config/ -description: Learn about blackbox_config -title: blackbox_config ---- - -# blackbox_config - -The `blackbox_config` block configures the `blackbox_exporter` -integration, which is an embedded version of -[`blackbox_exporter`](https://github.com/prometheus/blackbox_exporter). This allows -for the collection of blackbox metrics (probes) and exposing them as Prometheus metrics. - -## Quick configuration example - -To get started, define Blackbox targets in Grafana Agent's integration block: - -```yaml -metrics: - wal_directory: /tmp/wal -integrations: - blackbox: - enabled: true - blackbox_targets: - - name: example - address: http://example.com - module: http_2xx - blackbox_config: - modules: - http_2xx: - prober: http - timeout: 5s - http: - method: POST - headers: - Content-Type: application/json - body: '{}' - preferred_ip_protocol: "ip4" -``` - -Full reference of options: - -```yaml - # Enables the blackbox_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured statsd server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the blackbox_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/blackbox/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # blackbox configuration file with custom modules. - # This field has precedence to the config defined in the blackbox_config block. - # See https://github.com/prometheus/blackbox_exporter/blob/master/example.yml for more details how to generate custom blackbox.yml file. - [config_file: | default = ""] - - # Embedded blackbox configuration. You can specify your modules here instead of an external config file. - # config_file or blackbox_config must be specified. - # See https://github.com/prometheus/blackbox_exporter/blob/master/CONFIGURATION.md for more details how to specify your blackbox modules. - blackbox_config: - [- ... ] - - # List of targets to probe - blackbox_targets: - [- ... ] - - # Option to configure blackbox_exporter. - # Represents the offset to subtract from timeout in seconds when probing targets. - [probe_timeout_offset: | default = 0.5] -``` -## blackbox_target config - -```yaml - # Name of a blackbox_target - [name: ] - - # The address of the target to probe - [address: ] - - # Blackbox module to use to probe - [module: | default = ""] -``` diff --git a/docs/sources/static/configuration/integrations/cadvisor-config.md b/docs/sources/static/configuration/integrations/cadvisor-config.md deleted file mode 100644 index a4a33b4df2..0000000000 --- a/docs/sources/static/configuration/integrations/cadvisor-config.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/cadvisor-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/cadvisor-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/cadvisor-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/cadvisor-config/ -description: Learn about cadvisor_config -title: cadvisor_config ---- - -# cadvisor_config - -The `cadvisor_config` block configures the `cadvisor` integration, -which is an embedded version of -[`cadvisor`](https://github.com/google/cadvisor). This allows for the collection of container utilization metrics. - -The cAdvisor integration requires some broad privileged permissions to the host. Without these permissions the metrics will not be accessible. This means that the agent must *also* have those elevated permissions. - -A good example of the required file, and system permissions can be found in the docker run command published in the [cAdvisor docs](https://github.com/google/cadvisor#quick-start-running-cadvisor-in-a-docker-container). - -Full reference of options: - -```yaml - # Enables the cadvisor integration, allowing the Agent to automatically - # collect metrics for the specified github objects. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - [instance: | default = ] - - # Automatically collect metrics from this integration. If disabled, - # the cadvisor integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/cadvisor/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # cAdvisor-specific configuration options - # - - # Convert container labels and environment variables into labels on Prometheus metrics for each container. If false, then the only metrics exported are container name, first alias, and image name. `.` aren't valid in Prometheus label names, so if there are any in the container label, they will transformed to `_` when converted to the Prometheus label. - [store_container_labels: | default = true] - - # List of container labels to be converted to labels on Prometheus metrics for each container. store_container_labels must be set to false for this to take effect. This must match the format of the container label, not the converted Prometheus label (`.` are converted to `_` in the Prometheus label). - allowlisted_container_labels: - [ - ] - - # List of environment variable keys matched with specified prefix that needs to be collected for containers, only support containerd and docker runtime for now. - env_metadata_allowlist: - [ - ] - - # List of cgroup path prefix that needs to be collected even when docker_only is specified. - raw_cgroup_prefix_allowlist: - [ - ] - - # Path to a JSON file containing configuration of perf events to measure. Empty value disabled perf events measuring. - [perf_events_config: ] - - # resctrl mon groups updating interval. Zero value disables updating mon groups. - [resctrl_interval: | default = 0] - - # List of `metrics` to be disabled. If set, overrides the default disabled metrics. - disabled_metrics: - [ - ] - - # List of `metrics` to be enabled. If set, overrides disabled_metrics - enabled_metrics: - [ - ] - - # Length of time to keep data stored in memory - [storage_duration: | default = "2m"] - - # Containerd endpoint - [containerd: | default = "/run/containerd/containerd.sock"] - - # Containerd namespace - [containerd_namespace: | default = "k8s.io"] - - # Docker endpoint - [docker: | default = "unix:///var/run/docker.sock"] - - # Use TLS to connect to docker - [docker_tls: | default = false] - - # Path to client certificate for TLS connection to docker - [docker_tls_cert: | default = "cert.pem"] - - # Path to private key for TLS connection to docker - [docker_tls_key: | default = "key.pem"] - - # Path to a trusted CA for TLS connection to docker - [docker_tls_ca: | default = "ca.pem"] - - # Only report docker containers in addition to root stats - [docker_only: | default = false] - - # Disable collecting root Cgroup stats - [disable_root_cgroup_stats: | default = false] -``` diff --git a/docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md b/docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md deleted file mode 100644 index 6495625b76..0000000000 --- a/docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md +++ /dev/null @@ -1,468 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/cloudwatch-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/cloudwatch-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/cloudwatch-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/cloudwatch-exporter-config/ -description: Learn about cloudwatch_exporter_config -title: cloudwatch_exporter_config ---- - -# cloudwatch_exporter_config - -## Overview - -The `cloudwatch_exporter_config` block configures the `cloudwatch_exporter` integration, which is an embedded version of -[`YACE`](https://github.com/nerdswords/yet-another-cloudwatch-exporter/). Use the `cloudwatch_exporter` to collect [AWS CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/WhatIsCloudWatch.html) metrics. - -This integration lets you scrape CloudWatch metrics in a set of configurations that we will call *jobs*. There are -two kind of jobs: [`discovery`](#discovery_job) and [`static`](#static_job). - -## Authentication - -The agent must be running in an environment with access to AWS. The exporter uses the [AWS SDK for Go](https://aws.github.io/aws-sdk-go-v2/docs/getting-started/) and -provides authentication via [AWS's default credential chain](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/#specifying-credentials). Regardless of the method used to acquire the credentials, -some permissions are needed for the exporter to work. -``` -"tag:GetResources", -"cloudwatch:GetMetricData", -"cloudwatch:GetMetricStatistics", -"cloudwatch:ListMetrics" -``` - -The following IAM permissions are required for the [Transit Gateway](https://aws.amazon.com/transit-gateway/) attachment (tgwa) metrics to work. -``` -"ec2:DescribeTags", -"ec2:DescribeInstances", -"ec2:DescribeRegions", -"ec2:DescribeTransitGateway*" -``` - -The following IAM permission is required to discover tagged [API Gateway](https://aws.amazon.com/es/api-gateway/) REST APIs: -``` -"apigateway:GET" -``` - -The following IAM permissions are required to discover tagged [Database Migration Service](https://aws.amazon.com/dms/) (DMS) replication instances and tasks: -``` -"dms:DescribeReplicationInstances", -"dms:DescribeReplicationTasks" -``` - -To use all of the integration features, use the following AWS IAM Policy: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "Stmt1674249227793", - "Action": [ - "tag:GetResources", - "cloudwatch:GetMetricData", - "cloudwatch:GetMetricStatistics", - "cloudwatch:ListMetrics", - "ec2:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeTransitGateway*", - "apigateway:GET", - "dms:DescribeReplicationInstances", - "dms:DescribeReplicationTasks" - ], - "Effect": "Allow", - "Resource": "*" - } - ] -} -``` - -## Configuration options - -Configuration reference: - -```yaml - # - # Common Integration Settings - # - - # Enables the cloudwatch_exporter integration, allowing the Agent to automatically - # collect CloudWatch metrics as configured. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is a hash of the whole integration configuration. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the cloudwatch_exporter integration is run but not scraped and thus not - # remote-written. Metrics for the integration are exposed at - # /integrations/cloudwatch_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected. Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # Required: AWS region to use when calling STS (https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html) for retrieving - # account information. - # Ex: us-east-2 - sts_region: - - # Optional: Disable use of FIPS endpoints. Set 'true' when running outside of USA regions. - [fips_disabled: | default = false] - - - # Instead of retrieving metrics on request, decouple scraping retrieves the - # metrics on a schedule and returns the cached metrics. - decoupled_scraping: - # Enable decoupled scraping. - [enabled: | default = false ] - - # How often to scrape for CloudWatch metrics. - [scrape_interval: | default = "5m"] - - discovery: - - # Optional: List of tags (value) per service (key) to export in all metrics. For example defining the ["name", "type"] under - # AWS/EC2 will export the name and type tags and its values as labels in all metrics. Affects all discovery jobs. - # Ex: - # exported_tags: - # AWS/EC2: - # - name - exported_tags: - { : [ ] } - - # List of discovery jobs - jobs: [ ] - - # List of static jobs - static: [ ] - - # Optional: Enable debug logging on CloudWatch exporter internals. - [debug: | default = false] -``` - -### discovery_job - -A discovery job allows one to just define the AWS service to scrape, and the metrics under that service/namespace to retrieve. -The agent will find AWS resources in the specified service for which to scrape these metrics, label them appropriately, and -export them to Prometheus. For example, if we wanted to scrape CPU utilization and network traffic metrics, from all AWS -EC2 instances: - -```yaml -sts_region: us-east-2 -discovery: - jobs: - - type: AWS/EC2 - regions: - - us-east-2 - nil_to_zero: true - metrics: - - name: CPUUtilization - period: 5m - statistics: - - Average - nil_to_zero: true - - name: NetworkPacketsIn - period: 5m - statistics: - - Average - nil_to_zero: true -``` - -Configuration reference: - -```yaml - # Required: List of AWS regions. - regions: [ ] - - # Optional: List of IAM roles to assume. Defaults to the role on the environment configured AWS role. - roles: [ ] - - # Required: Cloudwatch service alias ("alb", "ec2", etc) or namespace name ("AWS/EC2", "AWS/S3", etc). See section below for all - # supported. - type: - - # Optional: List of `Key/Value` pairs to use for tag filtering (all must match). Value can be a regex. - search_tags: [ ] - - # Optional: Custom tags to be added as a list of `Key/Value` pairs. When exported to Prometheus format, the label name follows - # the following format: `custom_tag_{Key}`. - custom_tags: [ ] - - # Optional: List of metric dimensions to query. Before querying metric values, the total list of metrics will be filtered to only those that contain exactly this list of dimensions. An empty or undefined list results in all dimension combinations being included. - dimension_name_requirements: [ ] - - # Optional: Flag that controls if `NaN` metric values are converted to 0. Default `true`. This can be overridden in the config of each metric. - nil_to_zero: - - # Required: List of metric definitions to scrape. - metrics: [ ] -``` - -### static_job - -A static job allows one to scrape an individual CloudWatch metric. For that, metrics needs to be fully qualified, specifying the following: -1. `namespace`: For example `AWS/EC2`, `AWS/EBS`, `CoolApp` if it were a custom metric, etc. -2. `dimensions`: CloudWatch identifies a metrics by a set of dimensions. For example, all `AWS/EC2` metrics are identified by the `InstanceId` dimension. -3. `metrics`: Metric name and statistics. - -For example, if one wants to scrape the same metrics in the discovery example, but for a specific AWS EC2 instance: - -```yaml -sts_region: us-east-2 -static: - - name: single_ec2_instance - regions: - - us-east-2 - namespace: AWS/EC2 - dimensions: - - name: InstanceId - value: i-0e43cee369aa44b52 - nil_to_zero: true - metrics: - - name: CPUUtilization - period: 5m - statistics: - - Average - nil_to_zero: true - - name: NetworkPacketsIn - period: 5m - statistics: - - Average - nil_to_zero: true -``` - -All dimensions need to be specified when scraping single metrics like the example above. For example `AWS/Logs` metrics -require `Resource`, `Service`, `Class`, and `Type` dimensions to be specified. Same applies to CloudWatch custom metrics, -all dimensions attached to a metric when saved in CloudWatch are required. - -Configuration reference: - -```yaml - # Required: List of AWS regions. - regions: [ ] - - # Optional: List of IAM roles to assume. Defaults to the role on the environment configured AWS role. - roles: [ ] - - # Required: Identifier of the static scraping job. When exported to Prometheus format corresponds to the `name` label. - name: - - # Required: CloudWatch namespace - namespace: - - # Required: CloudWatch metric dimensions as a list of Name/Value pairs. Must uniquely define a single metric. - dimensions: [ ] - - # Optional: Custom tags to be added as a list of Key/Value pairs. When exported, the label name follows the following format: - # `custom_tag_{Key}`. - custom_tags: [ ] - - # Optional: Flag that controls if `NaN` metric values are converted to 0. Default `true`. This can be overridden in the config of each metric. - nil_to_zero: - - # Required: List of metric definitions to scrape. - metrics: [ ] -``` - -### aws_role - -Represents an [AWS IAM Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html). Required when configuring a job. If omitted -the AWS role that the credentials configured in the environment posses will be used. - -This is useful when scraping metrics from different AWS accounts with a single pair of credentials. In this case, a different role -is configured for the agent to assume prior to calling AWS APIs, therefore, the credentials configured in the system need -permission to assume the target role. See [this documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_permissions-to-switch.html) on how to configure this. - -```yaml - # Required: AWS IAM Role ARN the exporter should assume to perform AWS API calls. - role_arn: - - # Optional: External ID used when calling STS AssumeRole API. See https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html for details. - external_id: -``` - -### aws_dimension - -Represents an [AWS CloudWatch Dimension](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension). - -```yaml - name: - value: -``` - -### aws_tag - -Represents an [AWS Tag](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). - -```yaml - key: - value: -``` - -### metric - -Represents an AWS Metrics to scrape, under the context of a job. To see available metrics, AWS does not keep a documentation page with all available metrics. -Follow [this guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/viewing_metrics_with_cloudwatch.html) on how to explore metrics, to easily -pick the ones you need. - -```yaml - # Required: CloudWatch metric name. - name: - - # Required: List of statistic types, e.g. "Minimum", "Maximum", etc. - statistics: [ ] - - # Optional: See the `period and length` section below. - period: [ | default = 5m ] - - # Optional: See the `period and length` section below. - length: [ | default = calculated based on `period` ] - - # Optional: Flag that controls if `NaN` metric values are converted to 0. - # When not set, the value defaults to the setting in the parent static or discovery block (`true` if not set in the parent block). - nil_to_zero: -``` - -### Period and length - -`period` controls the width of the time bucket used for aggregating metrics collected from CloudWatch. -`length` controls how far back in time CloudWatch metrics are considered during each agent scrape. -If both settings are configured, the time parameters when calling CloudWatch APIs work as follows: - -![](https://grafana.com/media/docs/agent/cloudwatch-period-and-length-time-model-2.png) - -As noted above, if there is a different `period` or `length` across multiple metrics under the same static or discovery job, -the minimum of all periods, and maximum of all lengths is configured. - -On the other hand, if `length` is not configured, both period and length settings are calculated based on -the required `period` configuration attribute. - -If all metrics within a job (discovery or static) have the same `period` value configured, CloudWatch APIs will be -requested for metrics from the scrape time, to `period`s seconds in the past. -The values of these metrics are exported to Prometheus. - -![](https://grafana.com/media/docs/agent/cloudwatch-single-period-time-model.png) - -On the other hand, if metrics with different `period`s are configured under an individual job, this works differently. -First, two variables are calculated aggregating all periods: `length`, taking the maximum value of all periods, and -the new `period` value, taking the minimum of all periods. Then, CloudWatch APIs will be requested for metrics from -`now - length` to `now`, aggregating each in samples for `period` seconds. For each metric, the most recent sample -is exported to CloudWatch. - -![](https://grafana.com/media/docs/agent/cloudwatch-multiple-period-time-model.png) - -## Supported services in discovery jobs - -The following is a list of AWS services that are supported in `cloudwatch_exporter` discovery jobs. When configuring a -discovery job, the `type` field of each `discovery_job` must match either the desired job namespace or alias. - -- Namespace: `CWAgent` or Alias: `cwagent` -- Namespace: `AWS/Usage` or Alias: `usage` -- Namespace: `AWS/CertificateManager` or Alias: `acm` -- Namespace: `AWS/ACMPrivateCA` or Alias: `acm-pca` -- Namespace: `AmazonMWAA` or Alias: `airflow` -- Namespace: `AWS/MWAA` or Alias: `mwaa` -- Namespace: `AWS/ApplicationELB` or Alias: `alb` -- Namespace: `AWS/AppStream` or Alias: `appstream` -- Namespace: `AWS/Backup` or Alias: `backup` -- Namespace: `AWS/ApiGateway` or Alias: `apigateway` -- Namespace: `AWS/AmazonMQ` or Alias: `mq` -- Namespace: `AWS/AppSync` or Alias: `appsync` -- Namespace: `AWS/Athena` or Alias: `athena` -- Namespace: `AWS/AutoScaling` or Alias: `asg` -- Namespace: `AWS/ElasticBeanstalk` or Alias: `beanstalk` -- Namespace: `AWS/Billing` or Alias: `billing` -- Namespace: `AWS/Cassandra` or Alias: `cassandra` -- Namespace: `AWS/CloudFront` or Alias: `cloudfront` -- Namespace: `AWS/Cognito` or Alias: `cognito-idp` -- Namespace: `AWS/DMS` or Alias: `dms` -- Namespace: `AWS/DDoSProtection` or Alias: `shield` -- Namespace: `AWS/DocDB` or Alias: `docdb` -- Namespace: `AWS/DX` or Alias: `dx` -- Namespace: `AWS/DynamoDB` or Alias: `dynamodb` -- Namespace: `AWS/EBS` or Alias: `ebs` -- Namespace: `AWS/ElastiCache` or Alias: `ec` -- Namespace: `AWS/MemoryDB` or Alias: `memorydb` -- Namespace: `AWS/EC2` or Alias: `ec2` -- Namespace: `AWS/EC2Spot` or Alias: `ec2Spot` -- Namespace: `AWS/ECS` or Alias: `ecs-svc` -- Namespace: `ECS/ContainerInsights` or Alias: `ecs-containerinsights` -- Namespace: `AWS/EFS` or Alias: `efs` -- Namespace: `AWS/ELB` or Alias: `elb` -- Namespace: `AWS/ElasticMapReduce` or Alias: `emr` -- Namespace: `AWS/EMRServerless` or Alias: `emr-serverless` -- Namespace: `AWS/ES` or Alias: `es` -- Namespace: `AWS/Firehose` or Alias: `firehose` -- Namespace: `AWS/FSx` or Alias: `fsx` -- Namespace: `AWS/GameLift` or Alias: `gamelift` -- Namespace: `AWS/GlobalAccelerator` or Alias: `ga` -- Namespace: `Glue` or Alias: `glue` -- Namespace: `AWS/IoT` or Alias: `iot` -- Namespace: `AWS/Kafka` or Alias: `kafka` -- Namespace: `AWS/KafkaConnect` or Alias: `kafkaconnect` -- Namespace: `AWS/Kinesis` or Alias: `kinesis` -- Namespace: `AWS/KinesisAnalytics` or Alias: `kinesis-analytics` -- Namespace: `AWS/Lambda` or Alias: `lambda` -- Namespace: `AWS/MediaConnect` or Alias: `mediaconnect` -- Namespace: `AWS/MediaConvert` or Alias: `mediaconvert` -- Namespace: `AWS/MediaLive` or Alias: `medialive` -- Namespace: `AWS/MediaTailor` or Alias: `mediatailor` -- Namespace: `AWS/Neptune` or Alias: `neptune` -- Namespace: `AWS/NetworkFirewall` or Alias: `nfw` -- Namespace: `AWS/NATGateway` or Alias: `ngw` -- Namespace: `AWS/NetworkELB` or Alias: `nlb` -- Namespace: `AWS/PrivateLinkEndpoints` or Alias: `vpc-endpoint` -- Namespace: `AWS/PrivateLinkServices` or Alias: `vpc-endpoint-service` -- Namespace: `AWS/Prometheus` or Alias: `amp` -- Namespace: `AWS/QLDB` or Alias: `qldb` -- Namespace: `AWS/RDS` or Alias: `rds` -- Namespace: `AWS/Redshift` or Alias: `redshift` -- Namespace: `AWS/Route53Resolver` or Alias: `route53-resolver` -- Namespace: `AWS/Route53` or Alias: `route53` -- Namespace: `AWS/S3` or Alias: `s3` -- Namespace: `AWS/SES` or Alias: `ses` -- Namespace: `AWS/States` or Alias: `sfn` -- Namespace: `AWS/SNS` or Alias: `sns` -- Namespace: `AWS/SQS` or Alias: `sqs` -- Namespace: `AWS/StorageGateway` or Alias: `storagegateway` -- Namespace: `AWS/TransitGateway` or Alias: `tgw` -- Namespace: `AWS/TrustedAdvisor` or Alias: `trustedadvisor` -- Namespace: `AWS/VPN` or Alias: `vpn` -- Namespace: `AWS/ClientVPN` or Alias: `clientvpn` -- Namespace: `AWS/WAFV2` or Alias: `wafv2` -- Namespace: `AWS/WorkSpaces` or Alias: `workspaces` -- Namespace: `AWS/AOSS` or Alias: `aoss` -- Namespace: `AWS/SageMaker` or Alias: `sagemaker` -- Namespace: `/aws/sagemaker/Endpoints` or Alias: `sagemaker-endpoints` -- Namespace: `/aws/sagemaker/TrainingJobs` or Alias: `sagemaker-training` -- Namespace: `/aws/sagemaker/ProcessingJobs` or Alias: `sagemaker-processing` -- Namespace: `/aws/sagemaker/TransformJobs` or Alias: `sagemaker-transform` -- Namespace: `/aws/sagemaker/InferenceRecommendationsJobs` or Alias: `sagemaker-inf-rec` -- Namespace: `AWS/Sagemaker/ModelBuildingPipeline` or Alias: `sagemaker-model-building-pipeline` - diff --git a/docs/sources/static/configuration/integrations/consul-exporter-config.md b/docs/sources/static/configuration/integrations/consul-exporter-config.md deleted file mode 100644 index 469afc264f..0000000000 --- a/docs/sources/static/configuration/integrations/consul-exporter-config.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/consul-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/consul-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/consul-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/consul-exporter-config/ -description: Learn about consul_exporter_config -title: consul_exporter_config ---- - -# consul_exporter_config - -The `consul_exporter_config` block configures the `consul_exporter` -integration, which is an embedded version of -[`consul_exporter`](https://github.com/prometheus/consul_exporter). This allows -for the collection of consul metrics and exposing them as Prometheus metrics. - -Full reference of options: - -```yaml - # Enables the consul_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured consul server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname portion - # of the server URL. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the consul_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/consul_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # Prefix from which to expose key/value pairs. - [kv_prefix: | default = ""] - - # Regex that determines which keys to expose. - [kv_filter: | default = ".*"] - - # Generate a health summary for each service instance. Needs n+1 queries to - # collect all information. - [generate_health_summary: | default = true] - - # HTTP API address of a Consul server or agent. Prefix with https:// to - # connect using HTTPS. - [server: | default = "http://localhost:8500"] - - # Disable TLS host verification. - [insecure_skip_verify: | default = false] - - # File path to a PEM-encoded certificate authority used to validate the - # authenticity of a server certificate. - [ca_file: | default = ""] - - # File path to a PEM-encoded certificate used with the private key to verify - # the exporter's authenticity. - [cert_file: | default = ""] - - # File path to a PEM-encoded private key used with the certificate to verify - # the exporter's authenticity. - [key_file: | default = ""] - - # When provided, this overrides the hostname for the TLS certificate. It can - # be used to ensure that the certificate name matches the hostname we declare. - [server_name: | default = ""] - - # Timeout on HTTP requests to the Consul API. - [timeout: | default = "500ms"] - - # Limit the maximum number of concurrent requests to consul. 0 means no limit. - [concurrent_request_limit: | default = 0] - - # Allows any Consul server (non-leader) to service a read. - [allow_stale: | default = true] - - # Forces the read to be fully consistent. - [require_consistent: | default = false] -``` diff --git a/docs/sources/static/configuration/integrations/dnsmasq-exporter-config.md b/docs/sources/static/configuration/integrations/dnsmasq-exporter-config.md deleted file mode 100644 index fe38a827bf..0000000000 --- a/docs/sources/static/configuration/integrations/dnsmasq-exporter-config.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/dnsmasq-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/dnsmasq-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/dnsmasq-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/dnsmasq-exporter-config/ -description: Learn about dnsmasq_exporter_config -title: dnsmasq_exporter_config ---- - -# dnsmasq_exporter_config - -The `dnsmasq_exporter_config` block configures the `dnsmasq_exporter` integration, -which is an embedded version of -[`dnsmasq_exporter`](https://github.com/google/dnsmasq_exporter). This allows for -the collection of metrics from dnsmasq servers. - -Note that currently, an Agent can only collect metrics from a single dnsmasq -server. If you want to collect metrics from multiple servers, you can run -multiple Agents and add labels using `relabel_configs` to differentiate between -the servers: - -```yaml -dnsmasq_exporter: - enabled: true - dnsmasq_address: dnsmasq-a:53 - relabel_configs: - - source_labels: [__address__] - target_label: instance - replacement: dnsmasq-a -``` - -Full reference of options: - -```yaml - # Enables the dnsmasq_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured dnsmasq server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the dnsmasq_address - # value. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the dnsmasq_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/dnsmasq_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # Address of the dnsmasq server in host:port form. - [dnsmasq_address: | default = "localhost:53"] - - # Path to the dnsmasq leases file. If this file doesn't exist, scraping - # dnsmasq # will fail with an warning log message. - [leases_path: | default = "/var/lib/misc/dnsmasq.leases"] - - # Expose dnsmasq leases as metrics (high cardinality). - [expose_leases: | default = false] -``` diff --git a/docs/sources/static/configuration/integrations/elasticsearch-exporter-config.md b/docs/sources/static/configuration/integrations/elasticsearch-exporter-config.md deleted file mode 100644 index 9e0f3ee0f8..0000000000 --- a/docs/sources/static/configuration/integrations/elasticsearch-exporter-config.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/elasticsearch-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/elasticsearch-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/elasticsearch-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/elasticsearch-exporter-config/ -description: Learn about elasticsearch_exporter_config -title: elasticsearch_exporter_config ---- - -# elasticsearch_exporter_config - -The `elasticsearch_exporter_config` block configures the `elasticsearch_exporter` integration, -which is an embedded version of -[`elasticsearch_exporter`](https://github.com/prometheus-community/elasticsearch_exporter). This allows for -the collection of metrics from ElasticSearch servers. - -Note that currently, an Agent can only collect metrics from a single ElasticSearch server. -However, the exporter is able to collect the metrics from all nodes through that server configured. - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/prometheus-community/elasticsearch_exporter#elasticsearch-7x-security-privileges). - -Full reference of options: - -```yaml - # Enables the elasticsearch_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured ElasticSearch server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname portion - # of address. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the elasticsearch_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/elasticsearch_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # HTTP API address of an Elasticsearch node. - [ address: | default = "http://localhost:9200" ] - - # Timeout for trying to get stats from Elasticsearch. - [ timeout: | default = "5s" ] - - # Export stats for all nodes in the cluster. If used, this flag will override the flag `node`. - [ all: ] - - # Node's name of which metrics should be exposed. - [ node: ] - - # Export stats for indices in the cluster. - [ indices: ] - - # Export stats for settings of all indices of the cluster. - [ indices_settings: ] - - # Export stats for cluster settings. - [ cluster_settings: ] - - # Export stats for shards in the cluster (implies indices). - [ shards: ] - - # Export stats for the cluster snapshots. - [ snapshots: ] - - # Cluster info update interval for the cluster label. - [ clusterinfo_interval: | default = "5m" ] - - # Path to PEM file that contains trusted Certificate Authorities for the Elasticsearch connection. - [ ca: ] - - # Path to PEM file that contains the private key for client auth when connecting to Elasticsearch. - [ client_private_key: ] - - # Path to PEM file that contains the corresponding cert for the private key to connect to Elasticsearch. - [ client_cert: ] - - # Skip SSL verification when connecting to Elasticsearch. - [ ssl_skip_verify: ] - - # Include informational aliases metrics. - [ aliases: ] - - # Export stats for Data Streams. - [ data_stream: ] - - # Export stats for SLM (Snapshot Lifecycle Management). - [ slm: ] - - # Sets the `Authorization` header on every ES probe with the - # configured username and password. - # password and password_file are mutually exclusive. - basic_auth: - [ username: ] - [ password: ] - [ password_file: ] -``` diff --git a/docs/sources/static/configuration/integrations/gcp-exporter-config.md b/docs/sources/static/configuration/integrations/gcp-exporter-config.md deleted file mode 100644 index 56ef46aa93..0000000000 --- a/docs/sources/static/configuration/integrations/gcp-exporter-config.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/gcp-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/gcp-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/gcp-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/gcp-exporter-config/ -description: Learn about gcp_exporter_config -title: gcp_exporter_config ---- - -# gcp_exporter_config - -## Overview -The `gcp_exporter_config` block configures the `gcp_exporter` integration, which is an embedded version of -[`stackdriver_exporter`](https://github.com/prometheus-community/stackdriver_exporter). This allows for the collection of -metrics data from [GCP Cloud Monitoring (formerly stackdriver)](https://cloud.google.com/monitoring/docs). The exporter supports all metrics available via [GCP's monitoring API](https://cloud.google.com/monitoring/api/metrics_gcp). - -Metric names follow the template `stackdriver___`. - -The following example shows a load balancing metric: - -![gcp-exporter-config-metric-example](https://grafana.com/media/docs/agent/gcp-exporter-config-metric-example.png) - -The following list shows its attributes: \ -monitored_resource = `https_lb_rule`\ -metric_type_prefix = `loadbalancing.googleapis.com/`\ -metric_type = `https/backend_latencies` - -These attributes result in a final metric name of: -`stackdriver_https_lb_rule_loadbalancing_googleapis_com_https_backend_latencies` - -## Authentication - -Grafana Agent must be running in an environment with access to the GCP project it is scraping. The exporter -uses the Google Golang Client Library, which offers a variety of ways to [provide credentials](https://developers.google.com/identity/protocols/application-default-credentials). Choose the option that works best for you. - -After deciding how Agent will obtain credentials, ensure the account is set up with the IAM role `roles/monitoring.viewer`. -Since the exporter gathers all of its data from [GCP monitoring APIs](https://cloud.google.com/monitoring/api/v3), this is the only permission needed. - -## Configuration reference - -```yaml - # - # Common Integration Settings - # - - # Enables the gcp_exporter integration, allowing Agent to automatically collect metrics or expose gcp metrics. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is self-scraped. Default is - # based on subscriptions and ResourceType being monitored. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, the exporter integration is run but not - # scraped and thus not remote-written. Metrics for the integration are exposed at - # /integrations/gcp_exporter/metrics and can be scraped by an external process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing series that you don't care about to be dropped - # from the integration. - metric_relabel_configs: - [ - ... ] - - # How frequently to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration - # - - # Required: Configure the GCP Project(s) to scrape for metrics. - project_ids: - [ - ... ] - - # Required: One or more values from the supported GCP Metrics(https://cloud.google.com/monitoring/api/metrics_gcp). - # These can be as targeted or loose as needed. - # Using pubsub metrics (https://cloud.google.com/monitoring/api/metrics_gcp#gcp-pubsub) as an example - # all metrics. - # - pubsub.googleapis.com/ - # all snapshot specific metrics - # - pubsub.googleapis.com/snapshot - # all snapshot specific metrics and a few subscription metrics - # - pubsub.googleapis.com/snapshot - # - pubsub.googleapis.com/subscription/num_undelivered_messages - # - pubsub.googleapis.com/subscription/oldest_unacked_message_age - metrics_prefixes: - [ - ... ] - - # Optional: Used to further refine the resources you would like to collect metrics from. - # The structure for these filters is :. - # The `targeted_metric_prefix` is used to ensure the filter is only applied to the metric_prefix(es) where it makes sense. - # It does not explicitly have to match a value from `metric_prefixes` but the `targeted_metric_prefix` must be at least a - # prefix to one or more `metric_prefixes`. - # Example: - # metrics_prefixes = pubsub.googleapis.com/snapshot, pubsub.googleapis.com/subscription/num_undelivered_messages - # targeted_metric_prefix options would be: - # pubsub.googleapis.com (apply to all defined prefixes) - # pubsub.googleapis.com/snapshot (apply to only snapshot metrics) - # pubsub.googleapis.com/subscription (apply to only subscription metrics) - # pubsub.googleapis.com/subscription/num_undelivered_messages (apply to only the specific subscription metric) - # The `filter_query` is applied to a final metrics API query when querying for metric data - # You can read more about the metric API filter options in GCPs documentation https://cloud.google.com/monitoring/api/v3/filters. - # The final query sent to the metrics API already includes filters for project and metric type. Each applicable `filter_query` - # is appended to the query with an AND. - extra_filters: - [ - ... ] - - # Optional: The time range used when querying for metrics. - # Most of the time the default works perfectly fine. Most documented metrics include a comments of the form - # `Sampled every X seconds. After sampling, data is not visible for up to Y seconds.` - # As long as your `request_interval` is >= `Y` you should have no issues. - # Consider using `ingest_delay` if you would like this to be done programmatically or are gathering slower moving metrics. - [request_interval: | default = "5m"] - - # Optional: When enabled this automatically adjusts the time range used when querying for metrics backwards based on - # the metadata GCP has published for how long the data can take to be ingested. You can see the values for this in - # documented metrics as `After sampling, data is not visible for up to Y seconds.` - # Since GCPs ingestion delay is an "at worst," this is off by default to ensure data is gathered as soon as it's available. - [ingest_delay: | default = false] - - # Optional: When enabled this offsets the time range used when querying for metrics by a set amount. - [request_offset: | default = "0s"] - - # Optional: When enabled drops metrics from attached projects and only fetches metrics from the explicitly configured `project_ids`. - [drop_delegated_projects: | default = false] - - # Optional: Sets a timeout on the client used to make API calls to GCP. A single scrape can initiate numerous calls to - # GCP, so be mindful if you choose to override this value. - [gcp_client_timeout: | default = "15s"] -``` - -## Configuration Examples - -The following examples show working configurations. See the [Configuration Reference](#configuration-reference) for a full -overview of the configuration options and what they do. - -### Multiple prefixes -```yaml - gcp_exporter: - enabled: true - project_ids: - - - metrics_prefixes: - - run.googleapis.com/ - - cloudfunctions.googleapis.com/ - - compute.googleapis.com/nat - - logging.googleapis.com/billing - - logging.googleapis.com/exports - - serviceruntime.googleapis.com/quota/ - - storage.googleapis.com/ - - pubsub.googleapis.com/subscription -``` - -### Load balancing with a filter -```yaml - gcp_exporter: - enabled: true - project_ids: - - - metrics_prefixes: - - loadbalancing.googleapis.com - extra_filters: - - loadbalancing.googleapis.com:resource.labels.backend_target_name="sample-value" -``` - -### Subset of load balancing metrics with a filter -```yaml - gcp_exporter: - enabled: true - project_ids: - - - metrics_prefixes: - - loadbalancing.googleapis.com/https/request_bytes_count - - loadbalancing.googleapis.com/https/total_latencies - extra_filters: - - loadbalancing.googleapis.com:resource.labels.backend_target_name="sample-value" -``` diff --git a/docs/sources/static/configuration/integrations/github-exporter-config.md b/docs/sources/static/configuration/integrations/github-exporter-config.md deleted file mode 100644 index c1bbbfe0d0..0000000000 --- a/docs/sources/static/configuration/integrations/github-exporter-config.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/github-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/github-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/github-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/github-exporter-config/ -description: Learn about github_exporter_config -title: github_exporter_config ---- - -# github_exporter_config - -The `github_exporter_config` block configures the `github_exporter` integration, -which is an embedded version of -[`github_exporter`](https://github.com/githubexporter/github-exporter). This allows for the collection of metrics from the GitHub api. - -We strongly recommend that you configure a separate authentication token for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your repositories, as per the [official documentation](https://docs.github.com/en/rest/reference/permissions-required-for-github-apps). -We also recommend that you use `api_token_file` parameter, to avoid setting the authentication token directly on the Agent config file. - -Full reference of options: - -```yaml - # Enables the github_exporter integration, allowing the Agent to automatically - # collect metrics for the specified GitHub objects. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname portion - # of api_url. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the github_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/github_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The full URI of the GitHub API. - [api_url: | default = "https://api.github.com"] - - # A list of GitHub repositories for which to collect metrics. - repositories: - [ - ] - - # A list of GitHub organizations for which to collect metrics. - organizations: - [ - ] - - # A list of GitHub users for which to collect metrics. - users: - [ - ] - - # A GitHub authentication token that allows the API to be queried more often. - # Optional, but recommended. - [api_token: ] - - # A path to a file containing a GitHub authentication token that allows the - # API to be queried more often. If supplied, this supersedes `api_token` - # Optional, but recommended. - [api_token_file: ] -``` diff --git a/docs/sources/static/configuration/integrations/integrations-next/_index.md b/docs/sources/static/configuration/integrations/integrations-next/_index.md deleted file mode 100644 index cfa54bfb9b..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/_index.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/integrations-next/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/ -description: Learn about integrations next -menuTitle: Integrations next -title: Integrations next (Experimental) -weight: 100 ---- - -# Integrations next (Experimental) - -Release v0.22.0 of Grafana Agent includes experimental support for a revamped -integrations subsystem. The integrations subsystem is the second oldest part of -Grafana Agent, and has started to feel out of place as we built out the -project. - -The revamped integrations subsystem can be enabled by passing -`integrations-next` to the `-enable-features` command line flag. As an -experimental feature, there are no stability guarantees, and it may receive a -higher frequency of breaking changes than normal. - -The revamped integrations subsystem has the following benefits over the -original subsystem: - -* Integrations can opt in to supporting multiple instances. For example, you - may now run any number of `redis_exporter` integrations, where before you - could only have one per agent. Integrations such as `node_exporter` still - only support a single instance, as it wouldn't make sense to have multiple - instances of those. - -* Autoscrape (previously called "self-scraping"), when enabled, now supports - sending metrics for an integration directly to a running metrics instance. - This allows you configuring an integration to send to a specific Prometheus - remote_write endpoint. - -* A new service discovery HTTP API is included. This can be used with - Prometheus' [http_sd_config][http_sd_config]. The API returns extra labels - for integrations that previously were only available when autoscraping, such - as `agent_hostname`. - -* Integrations that aren't Prometheus exporters may now be added, such as - integrations that generate logs or traces. - -* Autoscrape, when enabled, now works completely in-memory without using the - network. - -[http_sd_config]: https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#http_sd_config - -## Config changes - -The revamp contains a number of breaking changes to the config. The schema of the -`integrations` key in the config file is now the following: - -```yaml -integrations: - # Controls settings for integrations that generate metrics. - metrics: - # Controls default settings for autoscrape. Individual instances of - # integrations inherit the defaults and may override them. - autoscrape: - # Enables autoscrape of integrations. - [enable: | default = true] - - # Specifies the metrics instance name to send metrics to. Instance - # names are located at metrics.configs[].name from the top-level config. - # The instance must exist. - # - # As it is common to use the name "default" for your primary instance, - # we assume the same here. - [metrics_instance: | default = "default"] - - # Autoscrape interval and timeout. Defaults are inherited from the global - # section of the top-level metrics config. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - - # Configs for integrations which do not support multiple instances. - [agent: ] - [cadvisor: ] - [node_exporter: ] - [process: ] - [statsd: ] - [windows: ] - [eventhandler: ] - [snmp: ] - [blackbox: ] - - # Configs for integrations that do support multiple instances. Note that - # these must be arrays. - consul_configs: - [- ...] - - dnsmasq_configs: - [- ...] - - elasticsearch_configs: - [- ...] - - github_configs: - [- ...] - - kafka_configs: - [- ...] - - memcached_configs: - [- ...] - - mongodb_configs: - [- ...] - - mssql_configs: - [- ...] - - mysql_configs: - [- ...] - - oracledb_configs: - [ - ...] - - postgres_configs: - [- ...] - - redis_configs: - [- ...] - - snowflake_configs: - [- ...] - - app_agent_receiver_configs: - [- ] - - apache_http_configs: - [- ] - - squid_configs: - [- ...] - - vsphere_configs: - [- ] - - gcp_configs: - [- ] - - azure_configs: - [- ] - - cloudwatch_configs: - [- ] -``` - -Note that most integrations are no longer configured with the `_exporter` name. -`node_exporter` is the only integration with `_exporter` name due to its -popularity in the Prometheus ecosystem. - -## Integrations changes - -Integrations no longer support an `enabled` field; they are enabled by being -defined in the YAML. To disable an integration, comment it out or remove it. - -Metrics-based integrations now use this common set of options: - -```yaml -# Provide an explicit value to uniquely identify this instance of the -# integration. If not provided, a reasonable default will be inferred based -# on the integration. -# -# The value here must be unique across all instances of the same integration. -[instance: ] - -# Override autoscrape defaults for this integration. -autoscrape: - # Enables autoscrape of integrations. - [enable: | default = ] - - # Specifies the metrics instance name to send metrics to. - [metrics_instance: | default = ] - - # Relabel the autoscrape job. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration. - metric_relabel_configs: - [ - ... ] - - # Autoscrape interval and timeout. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - -# An optional extra set of labels to add to metrics from the integration target. These -# labels are only exposed via the integration service discovery HTTP API and -# added when autoscrape is used. They will not be found directly on the metrics -# page for an integration. -extra_labels: - [ : ... ] -``` - -The old set of common options have been removed and do not work when the revamp -is being used: - -```yaml -# OLD SCHEMA: NO LONGER SUPPORTED - -[enabled: | default = false] -[instance: ] -[scrape_integration: | default = ] -[scrape_interval: | default = ] -[scrape_timeout: | default = ] -[wal_truncate_frequency: | default = "60m"] -relabel_configs: - [- ...] -metric_relabel_configs: - [ - ...] -``` diff --git a/docs/sources/static/configuration/integrations/integrations-next/app-agent-receiver-config.md b/docs/sources/static/configuration/integrations/integrations-next/app-agent-receiver-config.md deleted file mode 100644 index 7fe049a493..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/app-agent-receiver-config.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -aliases: -- ../../../../configuration/integrations/integrations-next/app-agent-receiver-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/app-agent-receiver-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/app-agent-receiver-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/app-agent-receiver-config/ -description: Learn about app_agent_receiver_config next -title: app_agent_receiver_config next ---- - -# app_agent_receiver_config next - -The `app_agent_receiver_config` block configures the `app_agent_receiver` -integration. This integration exposes a http endpoint that can receive telemetry -from the [Grafana Faro Web SDK](https://github.com/grafana/faro-web-sdk) -and forward it to logs, traces or metrics backends. - -These are the options you have for configuring the app_agent_receiver integration. - -```yaml - autoscrape: - # Enables autoscrape of integrations. - [enable: | default = true] - - # Specifies the metrics instance name to send metrics to. Instance - # names are located at metrics.configs[].name from the top-level config. - # The instance must exist. - # - # As it is common to use the name "default" for your primary instance, - # we assume the same here. - [metrics_instance: | default = "default"] - - # Autoscrape interval and timeout. Defaults are inherited from the global - # section of the top-level metrics config. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - - # Integration instance name - [instance: ] - - # Traces instance to send traces to. This assumes that you have a traces config with such instance defined - [traces_instance: | default = ""] - - # Logs instance to send logs and exceptions to. This assumes that you have a logs - # config with the instance defined - [logs_instance: | default = ""] - - # Server config refers to the HTTP endpoint that the integration will be exposing - # to receive data from. - server: - [host: | default = "127.0.0.1"] - [port: | default = 12347] - - # Domains in which the agent is sending data from. For example "https://myapp.com" - cors_allowed_origins: - [- ] - - # Configure rate limiting. The HTTP server of the App observability implements - # a token bucket rate limitng algorithm in which we can configure the maximum RPS - # as well as the burstiness (peaks of RPS) - rate_limiting: - [enabled: | default = false] - [rps: | default = 100] - [burstiness: | default = 50] - - # If configured, incoming requests will be required to specify this key in "x-api-key" header - [api_key: ] - - # Max allowed payload size in bytes for the JSON payload. Interanlly the - # Content-Length header is used to make this check - [max_allowed_payload_size: | default = 0] - - # Labels to set for the log entry. - # If value is specified, it will be used. - # If value is empty and key exists in data, it's value will be used from data - logs_labels: - [- : ] - - # Timeout duration when sending an entry to Loki, milliseconds - [logs_send_timeout: | default = 2s] - - # Sourcemap configuration for enabling stack trace transformation to original source locations - [sourcemaps: ] -``` - -## sourcemap_config - -```yaml -# Whether agent should attempt to download compiled sources and source maps -[download: | default = false] - -# List of HTTP origins to download sourcemaps for -[download_origins: [] | default = ["*"]] - -# Timeout for downloading compiled sources and sourcemaps -[download_timeout: | default = "1s"] - -# Sourcemap locations on filesystem. Takes precedence over downloading if both methods are enabled -filesystem: - [- ] -``` - -## sourcemap_file_location - -```yaml -# Source URL prefix. If a minified source URL matches this prefix, -# a filepath is constructed by removing the prefix, prepending path below and appending ".map". -# -# Example: -# -# minified_path_prefix = "https://my-app.dev/static/" -# path = "/var/app/static/" -# -# Then given source url "https://my-app.dev/static/foo.js" -# it will look for sourcemap at "/var/app/static/foo.js.map" - -minified_path_prefix: - -# Directory on file system that contains source maps. -# See above for more detailed explanation. -# It is parsed as a Go template. You can use "{{.Release }}" which will be replaced with -# app.release meta property. -path: -``` diff --git a/docs/sources/static/configuration/integrations/integrations-next/blackbox-config.md b/docs/sources/static/configuration/integrations/integrations-next/blackbox-config.md deleted file mode 100644 index fa99cf452f..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/blackbox-config.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -aliases: -- ../../../../configuration/integrations/integrations-next/blackbox-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/blackbox-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/blackbox-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/blackbox-config/ -description: Learn about blackbox_config next -title: blackbox_config next ---- - -# blackbox_config next - -The `blackbox_config` block configures the `blackbox_exporter` -integration, which is an embedded version of -[`blackbox_exporter`](https://github.com/prometheus/blackbox_exporter). This allows -for the collection of blackbox metrics (probes) and exposing them as Prometheus metrics. - -## Quick configuration example - -To get started, define Blackbox targets in Grafana Agent's integration block: - -```yaml -metrics: - wal_directory: /tmp/wal - configs: - - name: default -integrations: - blackbox: - blackbox_targets: - - name: example - address: http://example.com - module: http_2xx - blackbox_config: - modules: - http_2xx: - prober: http - timeout: 5s - http: - method: POST - headers: - Content-Type: application/json - body: '{}' - preferred_ip_protocol: "ip4" -``` - -Full reference of options: - -```yaml - # Provide an explicit value to uniquely identify this instance of the - # integration. If not provided, a reasonable default will be inferred based - # on the integration. - # - # The value here must be unique across all instances of the same integration. - [instance: ] - - # Override autoscrape defaults for this integration. - autoscrape: - # Enables autoscrape of integrations. - [enable: | default = ] - - # Specifies the metrics instance name to send metrics to. - [metrics_instance: | default = ] - - # Autoscrape interval and timeout. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - - # An optional extra set of labels to add to metrics from the integration target. These - # labels are only exposed via the integration service discovery HTTP API and - # added when autoscrape is used. They will not be found directly on the metrics - # page for an integration. - extra_labels: - [ : ... ] - - # - # Exporter-specific configuration options - # - - # blackbox configuration file with custom modules. - # This field has precedence to the config defined in the blackbox_config block. - # See https://github.com/prometheus/blackbox_exporter/blob/master/example.yml for more details how to generate custom blackbox.yml file. - [config_file: | default = ""] - - # Embedded blackbox configuration. You can specify your modules here instead of an external config file. - # See https://github.com/prometheus/blackbox_exporter/blob/master/CONFIGURATION.md for more details how to specify your blackbox modules. - blackbox_config: - [- ... ] - - # List of targets to probe - blackbox_targets: - [- ... ] - - # Option to configure blackbox_exporter. - # Represents the offset to subtract from timeout in seconds when probing targets. - [probe_timeout_offset: | default = 0.5] -``` -## blackbox_target config - -```yaml - # Name of a blackbox_target - [name: ] - - # The address of the target to probe - [address: ] - - # Blackbox module to use to probe - [module: | default = ""] -``` diff --git a/docs/sources/static/configuration/integrations/integrations-next/eventhandler-config.md b/docs/sources/static/configuration/integrations/integrations-next/eventhandler-config.md deleted file mode 100644 index 0008f8c29d..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/eventhandler-config.md +++ /dev/null @@ -1,251 +0,0 @@ ---- -aliases: -- ../../../../configuration/integrations/integrations-next/eventhandler-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/eventhandler-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/eventhandler-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/eventhandler-config/ -description: Learn about eventhandler_config next -title: eventhandler_config next ---- - -# eventhandler_config next - -`eventhandler_config` configures the Kubernetes eventhandler integration. This -integration watches -[Event](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#event-v1-core) -resources in a Kubernetes cluster and forwards them as log entries to a Loki -sink. This integration depends on the experimental `integrations-next` feature -being enabled. - -On restart, the integration will look for a cache file (configured using -`cache_path`) that stores the last shipped event. This file is optional, and if -present, will be used to avoid double-shipping events if Agent or the -integration restarts. Kubernetes expires events after 60 minutes, so events -older than 60 minutes ago will never be shipped. - -To use the cache feature and maintain state in a Kubernetes environment, a -[StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) -must be used. Sample manifests are provided at the bottom of this doc. Please -adjust these according to your deployment preferences. You can also use a -Deployment, however the presence of the cache file will not be guaranteed and -the integration may ship duplicate entries in the event of a restart. Loki does -not yet support entry deduplication for the A->B->A case, so further -deduplication can only take place at the Grafana / front-end layer (Grafana -Explore does provide some deduplication features for Loki datasources). - -This integration uses Grafana Agent's embedded Loki-compatible `logs` subsystem -to ship entries, and a logs client and sink must be configured to use the -integration. Please see the sample Agent config below for an example -configuration. -[Pipelines](/docs/loki/latest/clients/promtail/pipelines/) -and relabel configuration are not yet supported, but these features will be -added soon. You should use the `job=eventhandler cluster=...` labels to query -your events (you can then use LogQL on top of the result set). - -If not running the integration in-cluster, the integration will use -`kubeconfig_path` to search for a valid Kubeconfig file, defaulting to a -kubeconfig in the user's home directory. If running in-cluster, the appropriate -`ServiceAccount` and Roles must be defined. Sample manifests are provided -below. - -Configuration reference: - -```yaml - # Provide an explicit value to uniquely identify this instance of the - # integration. If not provided, a reasonable default will be inferred based - # on the integration. - # - # The value here must be unique across all instances of the same integration. - [instance: ] - - ## Eventhandler hands watched events off to promtail using a promtail - ## client channel. This parameter configures how long to wait (in seconds) on the channel - ## before abandoning and moving on. - [send_timeout: | default = 60] - - ## Configures the path to a kubeconfig file. If not set, will fall back to using - ## an in-cluster config. If this fails, will fall back to checking the user's home - ## directory for a kubeconfig. - [kubeconfig_path: ] - - ## Path to a cache file that will store the last timestamp for a shipped event and events - ## shipped for that timestamp. Used to prevent double-shipping on integration restart. - [cache_path: | default = "./.eventcache/eventhandler.cache"] - - ## Name of logs subsystem instance to hand log entries off to. - [logs_instance: | default = "default"] - - ## K8s informer resync interval (seconds). You should use defaults here unless you are - ## familiar with K8s informers. - [informer_resync: | default = 120] - - ## The integration will flush the last event shipped out to disk every flush_interval seconds. - [flush_interval: | default = 10] - - ## If you would like to limit events to a given namespace, use this parameter. - [namespace: ] - - ## Configure extra labels to add to log lines - extra_labels: - { : } - - ## Format of the log line. The possible values are "logfmt" and "json". - ## The values are also LogQL parsers, which can be used for processing the logs - [log_format: | default = "logfmt"] -``` - -Sample agent config: - -```yaml -server: - log_level: info - -integrations: - eventhandler: - cache_path: "/etc/eventhandler/eventhandler.cache" - -logs: - configs: - - name: default - clients: - - url: https://logs-prod-us-central1.grafana.net/api/prom/push - basic_auth: - username: YOUR_LOKI_USER - password: YOUR_LOKI_API_KEY - external_labels: - cluster: "cloud" - positions: - filename: /tmp/positions0.yaml -``` - -Be sure to replace the Loki credentials with the appropriate values. - -Sample StatefulSet manifests. Please adjust these according to your needs: - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent-eventhandler - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent-eventhandler -rules: -- apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent-eventhandler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent-eventhandler -subjects: -- kind: ServiceAccount - name: grafana-agent-eventhandler - namespace: default ---- -apiVersion: v1 -kind: Service -metadata: - name: grafana-agent-eventhandler-svc -spec: - ports: - - port: 12345 - name: http-metrics - clusterIP: None - selector: - name: grafana-agent-eventhandler ---- -kind: ConfigMap -metadata: - name: grafana-agent-eventhandler - namespace: default -apiVersion: v1 -data: - agent.yaml: | - server: - log_level: info - - integrations: - eventhandler: - cache_path: "/etc/eventhandler/eventhandler.cache" - - logs: - configs: - - name: default - clients: - - url: https://logs-prod-us-central1.grafana.net/api/prom/push - basic_auth: - username: YOUR_LOKI_USER - password: YOUR_LOKI_API_KEY - external_labels: - cluster: "cloud" - positions: - filename: /tmp/positions0.yaml ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: grafana-agent-eventhandler - namespace: default -spec: - serviceName: "grafana-agent-eventhandler-svc" - selector: - matchLabels: - name: grafana-agent-eventhandler - replicas: 1 - template: - metadata: - labels: - name: grafana-agent-eventhandler - spec: - terminationGracePeriodSeconds: 10 - containers: - - name: agent - image: grafana/agent:main - imagePullPolicy: IfNotPresent - args: - - -config.file=/etc/agent/agent.yaml - - -enable-features=integrations-next - - -server.http.address=0.0.0.0:12345 - command: - - /bin/grafana-agent - env: - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - ports: - - containerPort: 12345 - name: http-metrics - volumeMounts: - - name: grafana-agent - mountPath: /etc/agent - - name: eventhandler-cache - mountPath: /etc/eventhandler - serviceAccount: grafana-agent-eventhandler - volumes: - - configMap: - name: grafana-agent-eventhandler - name: grafana-agent - volumeClaimTemplates: - - metadata: - name: eventhandler-cache - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi -``` diff --git a/docs/sources/static/configuration/integrations/integrations-next/snmp-config.md b/docs/sources/static/configuration/integrations/integrations-next/snmp-config.md deleted file mode 100644 index 2f331d6d69..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/snmp-config.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -aliases: -- ../../../../configuration/integrations/integrations-next/snmp-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/snmp-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/snmp-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/snmp-config/ -description: Learn about snmp config next -title: snmp config next ---- - -# snmp config next - -The `snmp` block configures the `snmp` integration, -which is an embedded version of -[`snmp_exporter`](https://github.com/prometheus/snmp_exporter). This allows collection of SNMP metrics from the network devices with ease. - - -## Quick configuration example - -To get started, define SNMP targets in Grafana agent's integration block: - -```yaml -metrics: - wal_directory: /tmp/wal -integrations: - snmp: - snmp_targets: - - name: network_switch_1 - address: 192.168.1.2 - module: if_mib - walk_params: public - auth: public - - name: network_router_2 - address: 192.168.1.3 - module: mikrotik - walk_params: private - auth: private - walk_params: - private: - retries: 2 - public: - retries: 1 -``` - -## Prometheus service discovery use case - -If you need to scrape SNMP devices in more dynamic environment, and cannot define devices in `snmp_targets` because targets would change over time, you can use service discovery approach. For instance, with [DNS discovery](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#dns_sd_config): - -```yaml - -metrics: - wal_directory: /tmp/wal - configs: - - name: snmp_targets - scrape_configs: - - job_name: 'snmp' - dns_sd_configs: - - names: - - switches.srv.example.org - - routers.srv.example.org - params: - module: [if_mib] - walk_params: [private] - auth: [private] - metrics_path: /integrations/snmp/metrics - relabel_configs: - - source_labels: [__address__] - target_label: __param_target - - source_labels: [__param_target] - target_label: instance - - replacement: 127.0.0.1:12345 # address must match grafana agent -server.http.address flag - target_label: __address__ -integrations: - snmp: - autoscrape: - enable: false # set autoscrape to off - walk_params: - private: - retries: 2 -``` - - -Full reference of options: - -```yaml - # Provide an explicit value to uniquely identify this instance of the - # integration. If not provided, a reasonable default will be inferred based - # on the integration. - # - # The value here must be unique across all instances of the same integration. - [instance: ] - - # Override autoscrape defaults for this integration. - autoscrape: - # Enables autoscrape of integrations. - [enable: | default = ] - - # Specifies the metrics instance name to send metrics to. - [metrics_instance: | default = ] - - # Autoscrape interval and timeout. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - - # An optional extra set of labels to add to metrics from the integration target. These - # labels are only exposed via the integration service discovery HTTP API and - # added when autoscrape is used. They will not be found directly on the metrics - # page for an integration. - extra_labels: - [ : ... ] - - # - # Exporter-specific configuration options - # - - # SNMP configuration file with custom modules. - # See https://github.com/prometheus/snmp_exporter#generating-configuration for more details how to generate custom snmp.yml file. - # If not defined, embedded snmp_exporter default set of modules is used. - [config_file: | default = ""] - - # Embedded SNMP configuration. You can specify your modules here instead of an external config file. - # See https://github.com/prometheus/snmp_exporter/tree/main#generating-configuration for more details how to specify your SNMP modules. - # If this and config_file are not defined, embedded snmp_exporter default set of modules is used. - snmp_config: - [- ... ] - [- ... ] - - # List of SNMP targets to poll - snmp_targets: - [- ... ] - - # Map of SNMP connection profiles that can be used to override default SNMP settings. - walk_params: - [ : ... ] - - -``` -## snmp_target config - -```yaml - # Name of a snmp_target - [name: ] - - # The address of SNMP device - [address: ] - - # SNMP module to use for polling - [module: | default = ""] - - # SNMP authentication profile to use - [auth: | default = ""] - - # walk_param config to use for this snmp_target - [walk_params: | default = ""] -``` - -## walk_param config - -```yaml - # How many objects to request with GET/GETBULK, defaults to 25. - # May need to be reduced for buggy devices. - [max_repetitions: | default = 25] - - # How many times to retry a failed request, defaults to 3. - [retries: | default = 3] - - # Timeout for each SNMP request, defaults to 5s. - [timeout: | default = 5s] -``` - - -## About SNMP modules - -SNMP module is the set of SNMP counters to be scraped together from the specific network device. - -SNMP modules available can be found in the embedded snmp.yml file [here](https://github.com/grafana/agent/blob/main/internal/static/integrations/snmp_exporter/common/snmp.yml). If not specified, `if_mib` module is used. - -If you need to use custom SNMP modules, you can [generate](https://github.com/prometheus/snmp_exporter#generating-configuration) your own snmp.yml file and specify it using `config_file` parameter. diff --git a/docs/sources/static/configuration/integrations/integrations-next/vsphere-config.md b/docs/sources/static/configuration/integrations/integrations-next/vsphere-config.md deleted file mode 100644 index b63523fe48..0000000000 --- a/docs/sources/static/configuration/integrations/integrations-next/vsphere-config.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -aliases: -- ../../../../configuration/integrations/integrations-next/vsphere-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/integrations-next/vsphere-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/integrations-next/vsphere-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/vsphere-config/ -description: Learn about vsphere_config next -menuTitle: vsphere_config next -title: vsphere config (beta) next ---- - -# vsphere config (beta) next - -The `vsphere_config` block configures the `vmware_exporter` integration, an embedded -version of [`vmware_exporter`](https://github.com/grafana/vmware_exporter), configured -to collect vSphere metrics. This integration is considered beta. - -Configuration reference: - -```yaml - autoscrape: - # Enables autoscrape of integrations. - [enable: | default = true] - - # Specifies the metrics instance name to send metrics to. Instance - # names are located at metrics.configs[].name from the top-level config. - # The instance must exist. - # - # As it is common to use the name "default" for your primary instance, - # we assume the same here. - [metrics_instance: | default = "default"] - - # Autoscrape interval and timeout. Defaults are inherited from the global - # section of the top-level metrics config. - [scrape_interval: | default = ] - [scrape_timeout: | default = ] - - # Integration instance name. This will default to the host:port of the configured - # vsphere_url. - [instance: | default = ] - - # Number of managed objects to include in each request to vsphere when - # fetching performance counters. - [request_chunk_size: | default = 256] - - # Number of concurrent requests to vsphere when fetching performance counters. - [collect_concurrency: | default = 8] - - # Interval on which to run vsphere managed object discovery. Setting this to a - # non-zero value will result in object discovery running in the background. Each - # scrape will use object data gathered during the last discovery. - # When this value is 0, object discovery occurs per scrape. - [discovery_interval: | default = 0] - [enable_exporter_metrics: | default = true] - - # The url of the vCenter SDK endpoint - vsphere_url: - - # vCenter username - vsphere_user: - - # vCenter password - vsphere_password: - -``` - -## Quick configuration example - -```yaml -integrations: - vsphere_configs: - - vsphere_url: https://127.0.0.1:8989/sdk - vsphere_user: user - vsphere_password: pass - request_chunk_size: 256 - collect_concurrency: 8 - instance: vsphere - autoscrape: - enable: true - metrics_instance: default - -metrics: - wal_directory: /tmp/grafana-agent-wal -server: - log_level: debug -``` diff --git a/docs/sources/static/configuration/integrations/kafka-exporter-config.md b/docs/sources/static/configuration/integrations/kafka-exporter-config.md deleted file mode 100644 index 14c8e5e990..0000000000 --- a/docs/sources/static/configuration/integrations/kafka-exporter-config.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/kafka-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/kafka-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/kafka-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/kafka-exporter-config/ -description: Learn about kafka_exporter_config -title: kafka_exporter_config ---- - -# kafka_exporter_config - -The `kafka_exporter_config` block configures the `kafka_exporter` -integration, which is an embedded version of [`kafka_exporter`](https://github.com/davidmparrott/kafka_exporter). -This allows for the collection of Kafka Lag metrics and exposing them as Prometheus metrics. - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [documentation](https://github.com/lightbend/kafka-lag-exporter#required-permissions-for-kafka-acl). - -Full reference of options: - -```yaml - # Enables the kafka_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured dnsmasq server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname - # portion of the first kafka_uri value. If there is more than one string - # in kafka_uri, the integration will fail to load and an instance value - # must be manually provided. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the dnsmasq_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/dnsmasq_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # Address array (host:port) of Kafka server - [kafka_uris: <[]string>] - - # Connect using SASL/PLAIN - [use_sasl: ] - - # Only set this to false if using a non-Kafka SASL proxy - [use_sasl_handshake: | default = true] - - # SASL user name - [sasl_username: ] - - # SASL user password - [sasl_password: ] - - # The SASL SCRAM SHA algorithm sha256 or sha512 as mechanism - [sasl_mechanism: ] - - # Connect using TLS - [use_tls: ] - - # The optional certificate authority file for TLS client authentication - [ca_file: ] - - # The optional certificate file for TLS client authentication - [cert_file: ] - - # The optional key file for TLS client authentication - [key_file: ] - - # If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - [insecure_skip_verify: ] - - # Kafka broker version - [kafka_version: | default = "2.0.0"] - - # if you need to use a group from zookeeper - [use_zookeeper_lag: ] - - # Address array (hosts) of zookeeper server. - [zookeeper_uris: <[]string>] - - # Kafka cluster name - [kafka_cluster_name: ] - - # Metadata refresh interval - [metadata_refresh_interval: | default = "1m"] - - # If true, all scrapes will trigger kafka operations otherwise, they will share results. WARN: This should be disabled on large clusters - [allow_concurrency: | default = true] - - # Maximum number of offsets to store in the interpolation table for a partition - [max_offsets: | default = 1000] - - # How frequently should the interpolation table be pruned, in seconds - [prune_interval_seconds: | default = 30] - - # Regex filter for topics to be monitored - [topics_filter_regex: | default = ".*"] - - # Regex filter for consumer groups to be monitored - [groups_filter_regex: | default = ".*"] - -``` diff --git a/docs/sources/static/configuration/integrations/memcached-exporter-config.md b/docs/sources/static/configuration/integrations/memcached-exporter-config.md deleted file mode 100644 index a8fe548f7c..0000000000 --- a/docs/sources/static/configuration/integrations/memcached-exporter-config.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/memcached-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/memcached-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/memcached-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/memcached-exporter-config/ -description: Learn about memcached_exporter_config -title: memcached_exporter_config ---- - -# memcached_exporter_config - -The `memcached_exporter_config` block configures the `memcached_exporter` -integration, which is an embedded version of -[`memcached_exporter`](https://github.com/prometheus/memcached_exporter). This -allows for the collection of metrics from memcached servers. - -Note that currently, an Agent can only collect metrics from a single memcached -server. If you want to collect metrics from multiple servers, you can run -multiple Agents and add labels using `relabel_configs` to differentiate between -the servers: - -```yaml -memcached_exporter: - enabled: true - memcached_address: memcached-a:53 - relabel_configs: - - source_labels: [__address__] - target_label: instance - replacement: memcached-a -``` - -Full reference of options: - -```yaml - # Enables the memcached_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured memcached server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from - # memcached_address. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the memcached_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/memcached_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # Address of the memcached server in host:port form. - [memcached_address: | default = "localhost:53"] - - # Timeout for connecting to memcached. - [timeout: | default = "1s"] - - # TLS configuration for requests to the memcached server. - tls_config: - # The CA cert to use. - [ca: ] - # The client cert to use. - [cert: ] - # The client key to use. - [key: ] - - # Path to the CA cert file to use. - [ca_file: ] - # Path to the client cert file to use. - [cert_file: ] - # Path to the client key file to use. - [key_file: ] - - # Used to verify the hostname for the memcached server. - [server_name: ] - - # Disable memcached server certificate validation. - [insecure_skip_verify: | default = false] - - # Minimum TLS version. - [min_version: ] - # Maximum TLS version. - [max_version: ] -``` diff --git a/docs/sources/static/configuration/integrations/mongodb_exporter-config.md b/docs/sources/static/configuration/integrations/mongodb_exporter-config.md deleted file mode 100644 index 4ed4b14b2b..0000000000 --- a/docs/sources/static/configuration/integrations/mongodb_exporter-config.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/mongodb_exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/mongodb_exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/mongodb_exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/mongodb_exporter-config/ -description: Learn about mongodb_exporter_config -title: mongodb_exporter_config ---- - -# mongodb_exporter_config - -The `mongodb_exporter_config` block configures the `mongodb_exporter` integration, which is an embedded version of percona's [`mongodb_exporter`](https://github.com/percona/mongodb_exporter). - -In order for this integration to work properly, you have to connect each node of your mongoDB cluster to an agent instance. -That's because this exporter does not collect metrics from multiple nodes. -Additionally, you need to define two custom label for your metrics using relabel_configs. -The first one is service_name, which is how you identify this node in your cluster (example: ReplicaSet1-Node1). -The second one is mongodb_cluster, which is the name of your mongodb cluster, and must be set the same value for all nodes composing the cluster (example: prod-cluster). -Here`s an example: - -```yaml -relabel_configs: - - source_labels: [__address__] - target_label: service_name - replacement: 'replicaset1-node1' - - source_labels: [__address__] - target_label: mongodb_cluster - replacement: 'prod-cluster' -``` - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/percona/mongodb_exporter#permissions). - -Besides that, there's not much to configure. Please refer to the full reference of options: - -```yaml - # Enables the mongodb_exporter integration - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname - # portion of the mongodb_uri field. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the mongodb_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/mongodb_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # metrics.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # metrics.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # MongoDB node connection URL, which must be in the [`Standard Connection String Format`](https://docs.mongodb.com/manual/reference/connection-string/#std-label-connections-standard-connection-string-format) - [mongodb_uri: ] - - # Whether or not a direct connect should be made. Direct connections are not valid if multiple hosts are specified or an SRV URI is used - [direct_connect: | default = true] - - # Enable autodiscover collections - [discovering_mode: | default = false] - - # Path to the file having Prometheus TLS config for basic auth. Only enable if you want to use TLS based authentication. - [tls_basic_auth_config_path: | default = ""] -``` - -For `tls_basic_auth_config_path`, check [`tls_config`](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config) for reference on the file format to be used. diff --git a/docs/sources/static/configuration/integrations/mssql-config.md b/docs/sources/static/configuration/integrations/mssql-config.md deleted file mode 100644 index 9152414c4f..0000000000 --- a/docs/sources/static/configuration/integrations/mssql-config.md +++ /dev/null @@ -1,333 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/mssql-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/mssql-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/mssql-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/mssql-config/ -description: Learn about mssql_config -title: mssql_config ---- - -# mssql_config - -The `mssql_config` block configures the `mssql` integration, an embedded version of [`sql_exporter`](https://github.com/burningalchemist/sql_exporter) that lets you collect [Microsoft SQL Server](https://www.microsoft.com/en-us/sql-server) metrics. - -It is recommended that you have a dedicated user set up for monitoring an mssql instance. -The user for monitoring must have the following grants in order to populate the metrics: -``` -GRANT VIEW ANY DEFINITION TO -GRANT VIEW SERVER STATE TO -``` - -## Quick configuration example - -To get started, define the MSSQL connection string in Grafana Agent's integration block: - -```yaml -metrics: - wal_directory: /tmp/wal -integrations: - mssql: - enabled: true - connection_string: "sqlserver://[user]:[pass]@localhost:1433" -``` - -Full reference of options: - -```yaml - # Enables the MSSQL integration, allowing the Agent to automatically - # collect metrics for the specified MSSQL instance. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is the host:port of the provided connection_string. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the MSSQL integration is run but not scraped and thus not - # remote-written. Metrics for the integration are exposed at - # /integrations/mssql/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, lets you drop series - # that you don't care about from the integration. - metric_relabel_configs: - [ - ... ] - - # How frequently the WAL is truncated for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The connection_string to use to connect to the MSSQL instance. - # It is specified in the form of: "sqlserver://:@:" - connection_string: - - # The maximum number of open database connections to the MSSQL instance. - [max_open_connections: | default = 3] - - # The maximum number of idle database connections to the MSSQL instance. - [max_idle_connections: | default = 3] - - # The timeout for scraping metrics from the MSSQL instance. - [timeout: | default = "10s"] - - # Embedded MSSQL query configuration for specifying custom MSSQL Prometheus metrics. - # See https://github.com/burningalchemist/sql_exporter#collectors for more details how to specify your metric configurations. - query_config: - [- ... ] - [- ... ]] -``` - -### Authentication -By default, the `USERNAME` and `PASSWORD` used within the `connection_string` argument corresponds to a SQL Server username and password. - -If Grafana Agent is running in the same Windows domain as the SQL Server, then you can use the parameter `authenticator=winsspi` within the `connection_string` to authenticate without any additional credentials. - -```conn -sqlserver://@:?authenticator=winsspi -``` - -If you want to use Windows credentials to authenticate, instead of SQL Server credentials, you can use the parameter `authenticator=ntlm` within the `connection_string`. -The `USERNAME` and `PASSWORD` then corresponds to a Windows username and password. -The Windows domain may need to be prefixed to the username with a trailing `\`. - -```conn -sqlserver://:@:?authenticator=ntlm -``` - -## Custom metrics -You can use the optional `query_config` parameter to retrieve custom Prometheus metrics for a MSSQL instance. - -If this is defined, the new configuration will be used to query your MSSQL instance and create whatever Prometheus metrics are defined. -If you want additional metrics on top of the default metrics, the default configuration must be used as a base. - -The default configuration used by this integration is as follows: -``` -collector_name: mssql_standard - -metrics: - - metric_name: mssql_local_time_seconds - type: gauge - help: 'Local time in seconds since epoch (Unix time).' - values: [unix_time] - query: | - SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time - - metric_name: mssql_connections - type: gauge - help: 'Number of active connections.' - key_labels: - - db - values: [count] - query: | - SELECT DB_NAME(sp.dbid) AS db, COUNT(sp.spid) AS count - FROM sys.sysprocesses sp - GROUP BY DB_NAME(sp.dbid) - # - # Collected from sys.dm_os_performance_counters - # - - metric_name: mssql_deadlocks_total - type: counter - help: 'Number of lock requests that resulted in a deadlock.' - values: [cntr_value] - query: | - SELECT cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Number of Deadlocks/sec' AND instance_name = '_Total' - - metric_name: mssql_user_errors_total - type: counter - help: 'Number of user errors.' - values: [cntr_value] - query: | - SELECT cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Errors/sec' AND instance_name = 'User Errors' - - metric_name: mssql_kill_connection_errors_total - type: counter - help: 'Number of severe errors that caused SQL Server to kill the connection.' - values: [cntr_value] - query: | - SELECT cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Errors/sec' AND instance_name = 'Kill Connection Errors' - - metric_name: mssql_page_life_expectancy_seconds - type: gauge - help: 'The minimum number of seconds a page will stay in the buffer pool on this node without references.' - values: [cntr_value] - query: | - SELECT top(1) cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Page life expectancy' - - metric_name: mssql_batch_requests_total - type: counter - help: 'Number of command batches received.' - values: [cntr_value] - query: | - SELECT cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Batch Requests/sec' - - metric_name: mssql_log_growths_total - type: counter - help: 'Number of times the transaction log has been expanded, per database.' - key_labels: - - db - values: [cntr_value] - query: | - SELECT rtrim(instance_name) AS db, cntr_value - FROM sys.dm_os_performance_counters WITH (NOLOCK) - WHERE counter_name = 'Log Growths' AND instance_name <> '_Total' - - metric_name: mssql_buffer_cache_hit_ratio - type: gauge - help: 'Ratio of requests that hit the buffer cache' - values: [BufferCacheHitRatio] - query: | - SELECT (a.cntr_value * 1.0 / b.cntr_value) * 100.0 as BufferCacheHitRatio - FROM sys.dm_os_performance_counters a - JOIN (SELECT cntr_value, OBJECT_NAME - FROM sys.dm_os_performance_counters - WHERE counter_name = 'Buffer cache hit ratio base' - AND OBJECT_NAME = 'SQLServer:Buffer Manager') b ON a.OBJECT_NAME = b.OBJECT_NAME - WHERE a.counter_name = 'Buffer cache hit ratio' - AND a.OBJECT_NAME = 'SQLServer:Buffer Manager' - - - metric_name: mssql_checkpoint_pages_sec - type: gauge - help: 'Checkpoint Pages Per Second' - values: [cntr_value] - query: | - SELECT cntr_value - FROM sys.dm_os_performance_counters - WHERE [counter_name] = 'Checkpoint pages/sec' - # - # Collected from sys.dm_io_virtual_file_stats - # - - metric_name: mssql_io_stall_seconds_total - type: counter - help: 'Stall time in seconds per database and I/O operation.' - key_labels: - - db - value_label: operation - values: - - read - - write - query_ref: mssql_io_stall - - # - # Collected from sys.dm_os_process_memory - # - - metric_name: mssql_resident_memory_bytes - type: gauge - help: 'SQL Server resident memory size (AKA working set).' - values: [resident_memory_bytes] - query_ref: mssql_process_memory - - - metric_name: mssql_virtual_memory_bytes - type: gauge - help: 'SQL Server committed virtual memory size.' - values: [virtual_memory_bytes] - query_ref: mssql_process_memory - - - metric_name: mssql_available_commit_memory_bytes - type: gauge - help: 'SQL Server available to be committed memory size.' - values: [available_commit_limit_bytes] - query_ref: mssql_process_memory - - - metric_name: mssql_memory_utilization_percentage - type: gauge - help: 'The percentage of committed memory that is in the working set.' - values: [memory_utilization_percentage] - query_ref: mssql_process_memory - - - metric_name: mssql_page_fault_count_total - type: counter - help: 'The number of page faults that were incurred by the SQL Server process.' - values: [page_fault_count] - query_ref: mssql_process_memory - - # - # Collected from sys.dm_os_sys_info - # - - metric_name: mssql_server_total_memory_bytes - type: gauge - help: 'SQL Server committed memory in the memory manager.' - values: [committed_memory_bytes] - query_ref: mssql_os_sys_info - - - metric_name: mssql_server_target_memory_bytes - type: gauge - help: 'SQL Server target committed memory set for the memory manager.' - values: [committed_memory_target_bytes] - query_ref: mssql_os_sys_info - - # - # Collected from sys.dm_os_sys_memory - # - - metric_name: mssql_os_memory - type: gauge - help: 'OS physical memory, used and available.' - value_label: 'state' - values: [used, available] - query: | - SELECT - (total_physical_memory_kb - available_physical_memory_kb) * 1024 AS used, - available_physical_memory_kb * 1024 AS available - FROM sys.dm_os_sys_memory - - metric_name: mssql_os_page_file - type: gauge - help: 'OS page file, used and available.' - value_label: 'state' - values: [used, available] - query: | - SELECT - (total_page_file_kb - available_page_file_kb) * 1024 AS used, - available_page_file_kb * 1024 AS available - FROM sys.dm_os_sys_memory -queries: - # Populates `mssql_io_stall` and `mssql_io_stall_total` - - query_name: mssql_io_stall - query: | - SELECT - cast(DB_Name(a.database_id) as varchar) AS [db], - sum(io_stall_read_ms) / 1000.0 AS [read], - sum(io_stall_write_ms) / 1000.0 AS [write] - FROM - sys.dm_io_virtual_file_stats(null, null) a - INNER JOIN sys.master_files b ON a.database_id = b.database_id AND a.file_id = b.file_id - GROUP BY a.database_id - # Populates `mssql_resident_memory_bytes`, `mssql_virtual_memory_bytes`, mssql_available_commit_memory_bytes, - # and `mssql_memory_utilization_percentage`, and `mssql_page_fault_count_total` - - query_name: mssql_process_memory - query: | - SELECT - physical_memory_in_use_kb * 1024 AS resident_memory_bytes, - virtual_address_space_committed_kb * 1024 AS virtual_memory_bytes, - available_commit_limit_kb * 1024 AS available_commit_limit_bytes, - memory_utilization_percentage, - page_fault_count - FROM sys.dm_os_process_memory - # Populates `mssql_server_total_memory_bytes` and `mssql_server_target_memory_bytes`. - - query_name: mssql_os_sys_info - query: | - SELECT - committed_kb * 1024 AS committed_memory_bytes, - committed_target_kb * 1024 AS committed_memory_target_bytes - FROM sys.dm_os_sys_info -``` diff --git a/docs/sources/static/configuration/integrations/mysqld-exporter-config.md b/docs/sources/static/configuration/integrations/mysqld-exporter-config.md deleted file mode 100644 index 8f266787ad..0000000000 --- a/docs/sources/static/configuration/integrations/mysqld-exporter-config.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/mysqld-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/mysqld-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/mysqld-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/mysqld-exporter-config/ -description: Learn about mysqld_exporter_config -title: mysqld_exporter_config ---- - -# mysqld_exporter_config - -The `mysqld_exporter_config` block configures the `mysqld_exporter` integration, -which is an embedded version of -[`mysqld_exporter`](https://github.com/prometheus/mysqld_exporter) -and allows for collection metrics from MySQL servers. - -Note that currently, an Agent can only collect metrics from a single MySQL -server. If you want to collect metrics from multiple servers, run multiple -Agents and add labels using `relabel_configs` to differentiate between the MySQL -servers: - -```yaml -mysqld_exporter: - enabled: true - data_source_name: root@(server-a:3306)/ - relabel_configs: - - source_labels: [__address__] - target_label: instance - replacement: server-a -``` - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/prometheus/mysqld_exporter#required-grants). - -Full reference of options: - -```yaml - # Enables the mysqld_exporter integration, allowing the Agent to collect - # metrics from a MySQL server. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is a truncated version of the - # connection DSN, containing only the server and db name. (Credentials - # are not included.) - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the mysqld_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/mysqld_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # Data Source Name specifies the MySQL server to connect to. This is REQUIRED - # but may also be specified by the MYSQLD_EXPORTER_DATA_SOURCE_NAME - # environment variable. If neither are set, the integration will fail to - # start. - # - # The format of this is specified here: https://github.com/go-sql-driver/mysql#dsn-data-source-name - # - # A working example value for a server with no required password - # authentication is: "root@(localhost:3306)/" - data_source_name: - - # A list of collector names to enable on top of the default set. - enable_collectors: - [ - ] - # A list of collector names to disable from the default set. - disable_collectors: - [ - ] - # A list of collectors to run. Fully overrides the default set. - set_collectors: - [ - ] - - # Set a lock_wait_timeout on the connection to avoid long metadata locking. - [lock_wait_timeout: | default = 2] - # Add a low_slow_filter to avoid slow query logging of scrapes. NOT supported - # by Oracle MySQL. - [log_slow_filter: | default = false] - - ## Collector-specific options - - # Minimum time a thread must be in each state to be counted. - [info_schema_processlist_min_time: | default = 0] - # Enable collecting the number of processes by user. - [info_schema_processlist_processes_by_user: | default = true] - # Enable collecting the number of processes by host. - [info_schema_processlist_processes_by_host: | default = true] - # The list of databases to collect table stats for. * for all - [info_schema_tables_databases: | default = "*"] - # Limit the number of events statements digests by response time. - [perf_schema_eventsstatements_limit: | default = 250] - # Limit how old the 'last_seen' events statements can be, in seconds. - [perf_schema_eventsstatements_time_limit: | default = 86400] - # Maximum length of the normalized statement text. - [perf_schema_eventsstatements_digtext_text_limit: | default = 120] - # Regex file_name filter for performance_schema.file_summary_by_instance - [perf_schema_file_instances_filter: | default = ".*"] - # Remove path prefix in performance_schema.file_summary_by_instance - [perf_schema_file_instances_remove_prefix: | default = "/var/lib/mysql"] - # Remove instrument prefix in performance_schema.memory_summary_global_by_event_name - [perf_schema_memory_events_remove_prefix: | default = "memory/"] - # Database from where to collect heartbeat data. - [heartbeat_database: | default = "heartbeat"] - # Table from where to collect heartbeat data. - [heartbeat_table: | default = "heartbeat"] - # Use UTC for timestamps of the current server (`pt-heartbeat` is called with `--utc`) - [heartbeat_utc: | default = false] - # Enable collecting user privileges from mysql.user - [mysql_user_privileges: | default = false] -``` - -The full list of collectors that are supported for `mysqld_exporter` is: - -| Name | Description | Enabled by default | -| ------------------------------------------------ | ----------- | ------------------ | -| auto_increment.columns | Collect auto_increment columns and max values from information_schema | no | -| binlog_size | Collect the current size of all registered binlog files | no | -| engine_innodb_status | Collect from SHOW ENGINE INNODB STATUS | no | -| engine_tokudb_status | Collect from SHOW ENGINE TOKUDB STATUS | no | -| global_status | Collect from SHOW GLOBAL STATUS | yes | -| global_variables | Collect from SHOW GLOBAL VARIABLES | yes | -| heartbeat | Collect from heartbeat | no | -| info_schema.clientstats | If running with userstat=1, enable to collect client statistics | no | -| info_schema.innodb_cmpmem | Collect metrics from information_schema.innodb_cmpmem | yes | -| info_schema.innodb_metrics | Collect metrics from information_schema.innodb_metrics | yes | -| info_schema.innodb_tablespaces | Collect metrics from information_schema.innodb_sys_tablespaces | no | -| info_schema.processlist | Collect current thread state counts from the information_schema.processlist | no | -| info_schema.query_response_time | Collect query response time distribution if query_response_time_stats is ON | yes | -| info_schema.replica_host | Collect metrics from information_schema.replica_host_status | no | -| info_schema.schemastats | If running with userstat=1, enable to collect schema statistics | no | -| info_schema.tables | Collect metrics from information_schema.tables | no | -| info_schema.tablestats | If running with userstat=1, enable to collect table statistics | no | -| info_schema.userstats | If running with userstat=1, enable to collect user statistics | no | -| mysql.user | Collect data from mysql.user | no | -| perf_schema.eventsstatements | Collect metrics from performance_schema.events_statements_summary_by_digest | no | -| perf_schema.eventsstatementssum | Collect metrics of grand sums from performance_schema.events_statements_summary_by_digest | no | -| perf_schema.eventswaits | Collect metrics from performance_schema.events_waits_summary_global_by_event_name | no | -| perf_schema.file_events | Collect metrics from performance_schema.file_summary_by_event_name | no | -| perf_schema.file_instances | Collect metrics from performance_schema.file_summary_by_instance | no | -| perf_schema.indexiowaits | Collect metrics from performance_schema.table_io_waits_summary_by_index_usage | no | -| perf_schema.memory_events | Collect metrics from performance_schema.memory_summary_global_by_event_name |no | -| perf_schema.replication_applier_status_by_worker | Collect metrics from performance_schema.replication_applier_status_by_worker | no | -| perf_schema.replication_group_member_stats | Collect metrics from performance_schema.replication_group_member_stats | no | -| perf_schema.replication_group_members | Collect metrics from performance_schema.replication_group_members | no | -| perf_schema.tableiowaits | Collect metrics from performance_schema.table_io_waits_summary_by_table | no | -| perf_schema.tablelocks | Collect metrics from performance_schema.table_lock_waits_summary_by_table | no | -| slave_hosts | Scrape information from 'SHOW SLAVE HOSTS' | no | -| slave_status | Scrape information from SHOW SLAVE STATUS | yes | diff --git a/docs/sources/static/configuration/integrations/node-exporter-config.md b/docs/sources/static/configuration/integrations/node-exporter-config.md deleted file mode 100644 index 9919464056..0000000000 --- a/docs/sources/static/configuration/integrations/node-exporter-config.md +++ /dev/null @@ -1,435 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/node-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/node-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/node-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/node-exporter-config/ -description: Learn about node_exporter_config -title: node_exporter_config ---- - -# node_exporter_config - -The `node_exporter_config` block configures the `node_exporter` integration, -which is an embedded version of -[`node_exporter`](https://github.com/prometheus/node_exporter) -and allows for collecting metrics from the UNIX system that `node_exporter` is -running on. It provides a significant amount of collectors that are responsible -for monitoring various aspects of the host system. - -Note that if running the Agent in a container, you will need to bind mount -folders from the host system so the integration can monitor them. You can use -the example below, making sure to replace `/path/to/config.yaml` with a path on -your host machine where an Agent configuration file is: - -``` -docker run \ - --net="host" \ - --pid="host" \ - --cap-add=SYS_TIME \ - -v "/:/host/root:ro,rslave" \ - -v "/sys:/host/sys:ro,rslave" \ - -v "/proc:/host/proc:ro,rslave" \ - -v /tmp/agent:/etc/agent \ - -v /path/to/config.yaml:/etc/agent-config/agent.yaml \ - grafana/agent:{{< param "AGENT_RELEASE" >}} \ - --config.file=/etc/agent-config/agent.yaml -``` - -Use this configuration file for testing out `node_exporter` support, replacing -the `remote_write` settings with settings appropriate for you: - -```yaml -server: - log_level: info - -metrics: - wal_directory: /tmp/agent - global: - scrape_interval: 60s - remote_write: - - url: https://prometheus-us-central1.grafana.net/api/prom/push - basic_auth: - username: user-id - password: api-token - -integrations: - node_exporter: - enabled: true - rootfs_path: /host/root - sysfs_path: /host/sys - procfs_path: /host/proc - udev_data_path: /host/root/run/udev/data -``` - -For running on Kubernetes, ensure to set the equivalent mounts and capabilities -there as well: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: agent -spec: - containers: - - image: {{< param "AGENT_RELEASE" >}} - name: agent - args: - - --config.file=/etc/agent-config/agent.yaml - securityContext: - capabilities: - add: ["SYS_TIME"] - privileged: true - runAsUser: 0 - volumeMounts: - - name: rootfs - mountPath: /host/root - readOnly: true - - name: sysfs - mountPath: /host/sys - readOnly: true - - name: procfs - mountPath: /host/proc - readOnly: true - hostPID: true - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - volumes: - - name: rootfs - hostPath: - path: / - - name: sysfs - hostPath: - path: /sys - - name: procfs - hostPath: - path: /proc -``` - -The manifest and Tanka configs provided by this repository do not have the -mounts or capabilities required for running this integration. - -Some collectors only work on specific operating systems, documented in the -table below. Enabling a collector that is not supported by the operating system -the Agent is running on is a no-op. - -| Name | Description | OS | Enabled by default | -| ---------------- | ----------- | -- | ------------------ | -| arp | Exposes ARP statistics from /proc/net/arp. | Linux | yes | -| bcache | Exposes bcache statistics from /sys/fs/bcache. | Linux | yes | -| bonding | Exposes the number of configured and active slaves of Linux bonding interfaces. | Linux | yes | -| boottime | Exposes system boot time derived from the kern.boottime sysctl. | Darwin, Dragonfly, FreeBSD, NetBSD, OpenBSD, Solaris | yes | -| btrfs | Exposes statistics on btrfs. | Linux | yes | -| buddyinfo | Exposes statistics of memory fragments as reported by /proc/buddyinfo. | Linux | no | -| cgroups | Exposes number of active and enabled cgroups. | Linux | no | -| conntrack | Shows conntrack statistics (does nothing if no /proc/sys/net/netfilter/ present). | Linux | yes | -| cpu | Exposes CPU statistics. | Darwin, Dragonfly, FreeBSD, Linux, Solaris, NetBSD | yes | -| cpufreq | Exposes CPU frequency statistics. | Linux, Solaris | yes | -| devstat | Exposes device statistics. | Dragonfly, FreeBSD | no | -| diskstats | Exposes disk I/O statistics. | Darwin, Linux, OpenBSD | yes | -| dmi | Exposes DMI information. | Linux | yes | -| drbd | Exposes Distributed Replicated Block Device statistics (to version 8.4). | Linux | no | -| drm | Exposes GPU card info from /sys/class/drm/card?/device | Linux | no | -| edac | Exposes error detection and correction statistics. | Linux | yes | -| entropy | Exposes available entropy. | Linux | yes | -| ethtool | Exposes ethtool stats | Linux | no | -| exec | Exposes execution statistics. | Dragonfly, FreeBSD | yes | -| fibrechannel | Exposes FibreChannel statistics. | Linux | yes | -| filefd | Exposes file descriptor statistics from /proc/sys/fs/file-nr. | Linux | yes | -| filesystem | Exposes filesystem statistics, such as disk space used. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD | yes | -| hwmon | Exposes hardware monitoring and sensor data from /sys/class/hwmon. | Linux | yes | -| infiniband | Exposes network statistics specific to InfiniBand and Intel OmniPath configurations. | Linux | yes | -| interrupts | Exposes detailed interrupts statistics. | Linux, OpenBSD | no | -| ipvs | Exposes IPVS status from /proc/net/ip_vs and stats from /proc/net/ip_vs_stats. | Linux | yes | -| ksmd | Exposes kernel and system statistics from /sys/kernel/mm/ksm. | Linux | no | -| lnstat | Exposes Linux network cache stats | Linux | no | -| loadavg | Exposes load average. | Darwin, Dragonfly, FreeBSD, Linux, NetBSD, OpenBSD, Solaris | yes | -| logind | Exposes session counts from logind. | Linux | no | -| mdadm | Exposes statistics about devices in /proc/mdstat (does nothing if no /proc/mdstat present). | Linux | yes | -| meminfo | Exposes memory statistics. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD, NetBSD | yes | -| meminfo_numa | Exposes memory statistics from /proc/meminfo_numa. | Linux | no | -| mountstats | Exposes filesystem statistics from /proc/self/mountstats. Exposes detailed NFS client statistics. | Linux | no | -| netclass | Exposes network interface info from /sys/class/net. | Linux | yes | -| netisr | Exposes netisr statistics. | FreeBSD | yes | -| netdev | Exposes network interface statistics such as bytes transferred. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD | yes | -| netstat | Exposes network statistics from /proc/net/netstat. This is the same information as netstat -s. | Linux | yes | -| network_route | Exposes network route statistics. | Linux | no | -| nfs | Exposes NFS client statistics from /proc/net/rpc/nfs. This is the same information as nfsstat -c. | Linux | yes | -| nfsd | Exposes NFS kernel server statistics from /proc/net/rpc/nfsd. This is the same information as nfsstat -s. | Linux | yes | -| ntp | Exposes local NTP daemon health to check time. | any | no | -| nvme | Exposes NVMe statistics. | Linux | yes | -| os | Exposes os-release information. | Linux | yes | -| perf | Exposes perf based metrics (Warning: Metrics are dependent on kernel configuration and settings). | Linux | no | -| powersupplyclass | Collects information on power supplies. | any | yes | -| pressure | Exposes pressure stall statistics from /proc/pressure/. | Linux (kernel 4.20+ and/or CONFIG_PSI) | yes | -| processes | Exposes aggregate process statistics from /proc. | Linux | no | -| qdisc | Exposes queuing discipline statistics. | Linux | no | -| rapl | Exposes various statistics from /sys/class/powercap. | Linux | yes | -| runit | Exposes service status from runit. | any | no | -| schedstat | Exposes task scheduler statistics from /proc/schedstat. | Linux | yes | -| selinux | Exposes SELinux statistics. | Linux | yes | -| slabinfo | Exposes slab statistics from `/proc/slabinfo`. | Linux | no | -| softirqs | Exposes detailed softirq statistics from `/proc/softirqs`. | Linux | no | -| sockstat | Exposes various statistics from /proc/net/sockstat. | Linux | yes | -| softnet | Exposes statistics from /proc/net/softnet_stat. | Linux | yes | -| stat | Exposes various statistics from /proc/stat. This includes boot time, forks and interrupts. | Linux | yes | -| supervisord | Exposes service status from supervisord. | any | no | -| sysctl | Expose sysctl values from `/proc/sys`. | Linux | no | -| systemd | Exposes service and system status from systemd. | Linux | no | -| tapestats | Exposes tape device stats. | Linux | yes | -| tcpstat | Exposes TCP connection status information from /proc/net/tcp and /proc/net/tcp6. (Warning: the current version has potential performance issues in high load situations). | Linux | no | -| textfile | Collects metrics from files in a directory matching the filename pattern *.prom. The files must be using the text format defined here: https://prometheus.io/docs/instrumenting/exposition_formats/ | any | yes | -| thermal | Exposes thermal statistics. | Darwin | yes | -| thermal_zone | Exposes thermal zone & cooling device statistics from /sys/class/thermal. | Linux | yes | -| time | Exposes the current system time. | any | yes | -| timex | Exposes selected adjtimex(2) system call stats. | Linux | yes | -| udp_queues | Exposes UDP total lengths of the rx_queue and tx_queue from /proc/net/udp and /proc/net/udp6. | Linux | yes | -| uname | Exposes system information as provided by the uname system call. | Darwin, FreeBSD, Linux, OpenBSD, NetBSD | yes | -| vmstat | Exposes statistics from /proc/vmstat. | Linux | yes | -| wifi | Exposes WiFi device and station statistics. | Linux | no | -| xfs | Exposes XFS runtime statistics. | Linux (kernel 4.4+) | yes | -| zfs | Exposes ZFS performance statistics. | Linux, Solaris | yes | -| zoneinfo | Exposes zone stats. | Linux | no | - -```yaml - # Enables the node_exporter integration, allowing the Agent to automatically - # collect system metrics from the host UNIX system. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the node_exporter integration will be run but not scraped and thus not remote-written. Metrics for the - # integration will be exposed at /integrations/node_exporter/metrics and can - # be scraped by an external process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timtout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # Monitor the exporter itself and include those metrics in the results. - [include_exporter_metrics: | default = false] - - # Optionally defines the list of enabled-by-default collectors. - # Anything not provided in the list below will be disabled by default, - # but requires at least one element to be treated as defined. - # - # This is useful if you have a very explicit set of collectors you wish - # to run. - set_collectors: - - [] - - # Additional collectors to enable on top of the default set of enabled - # collectors or on top of the list provided by set_collectors. - # - # This is useful if you have a few collectors you wish to run that are - # not enabled by default, but do not want to explicitly provide an entire - # list through set_collectors. - enable_collectors: - - [] - - # Additional collectors to disable on top of the default set of disabled - # collectors. Takes precedence over enable_collectors. - # - # This is useful if you have a few collectors you do not want to run that - # are enabled by default, but do not want to explicitly provide an entire - # list through set_collectors. - disable_collectors: - - [] - - # procfs mountpoint. - [procfs_path: | default = "/proc"] - - # sysfs mountpoint. - [sysfs_path: | default = "/sys"] - - # rootfs mountpoint. If running in docker, the root filesystem of the host - # machine should be mounted and this value should be changed to the mount - # directory. - [rootfs_path: | default = "/"] - - # udev data path needed for diskstats from Node exporter. When running - # in Kubernetes it should be set to /host/root/run/udev/data. - [udev_data_path: | default = "/run/udev/data"] - - # Expose expensive bcache priority stats. - [enable_bcache_priority_stats: ] - - # Regexp of `bugs` field in cpu info to filter. - [cpu_bugs_include: ] - - # Enable the node_cpu_guest_seconds_total metric. - [enable_cpu_guest_seconds_metric: | default = true] - - # Enable the cpu_info metric for the cpu collector. - [enable_cpu_info_metric: | default = true] - - # Regexp of `flags` field in cpu info to filter. - [cpu_flags_include: ] - - # Regexp of devices to ignore for diskstats. - [diskstats_device_exclude: | default = "^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$"] - - # Regexp of devices to include for diskstats. If set, the diskstat_device_exclude field is ignored. - [diskstats_device_include: ] - - # Regexp of ethtool devices to exclude (mutually exclusive with ethtool_device_include) - [ethtool_device_exclude: ] - - # Regexp of ethtool devices to include (mutually exclusive with ethtool_device_exclude) - [ethtool_device_include: ] - - # Regexp of ethtool stats to include. - [ethtool_metrics_include: | default = ".*"] - - # Regexp of mount points to ignore for filesystem collector. - [filesystem_mount_points_exclude: | default = "^/(dev|proc|sys|var/lib/docker/.+)($|/)"] - - # Regexp of filesystem types to ignore for filesystem collector. - [filesystem_fs_types_exclude: | default = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"] - - # How long to wait for a mount to respond before marking it as stale. - [filesystem_mount_timeout: | default = "5s"] - - # Array of IPVS backend stats labels. - # - # The default is [local_address, local_port, remote_address, remote_port, proto, local_mark]. - ipvs_backend_labels: - [- ] - - # NTP server to use for ntp collector - [ntp_server: | default = "127.0.0.1"] - - # NTP protocol version - [ntp_protocol_version: | default = 4] - - # Certify that the server address is not a public ntp server. - [ntp_server_is_local: | default = false] - - # IP TTL to use wile sending NTP query. - [ntp_ip_ttl: | default = 1] - - # Max accumulated distance to the root. - [ntp_max_distance: | default = "3466080us"] - - # Offset between local clock and local ntpd time to tolerate. - [ntp_local_offset_tolerance: | default = "1ms"] - - # Regexp of net devices to ignore for netclass collector. - [netclass_ignored_devices: | default = "^$"] - - # Ignore net devices with invalid speed values. This will default to true in - # node_exporter 2.0. - [netclass_ignore_invalid_speed_device: | default = false] - - # Enable collecting address-info for every device. - [netdev_address_info: ] - - # Regexp of net devices to exclude (mutually exclusive with include) - [netdev_device_exclude: | default = ""] - - # Regexp of net devices to include (mutually exclusive with exclude) - [netdev_device_include: | default = ""] - - # Regexp of fields to return for netstat collector. - [netstat_fields: | default = "^(.*_(InErrors|InErrs)|Ip_Forwarding|Ip(6|Ext)_(InOctets|OutOctets)|Icmp6?_(InMsgs|OutMsgs)|TcpExt_(Listen.*|Syncookies.*|TCPSynRetrans|TCPTimeouts)|Tcp_(ActiveOpens|InSegs|OutSegs|OutRsts|PassiveOpens|RetransSegs|CurrEstab)|Udp6?_(InDatagrams|OutDatagrams|NoPorts|RcvbufErrors|SndbufErrors))$"] - - # List of CPUs from which perf metrics should be collected. - [perf_cpus: | default = ""] - - # Array of perf tracepoints that should be collected. - perf_tracepoint: - [- ] - - # Disable perf hardware profilers. - [perf_disable_hardware_profilers: | default = false] - - # Perf hardware profilers that should be collected. - perf_hardware_profilers: - [- ] - - # Disable perf software profilers. - [perf_disable_software_profilers: | default = false] - - # Perf software profilers that should be collected. - perf_software_profilers: - [- ] - - # Disable perf cache profilers. - [perf_disable_cache_profilers: | default = false] - - # Perf cache profilers that should be collected. - perf_cache_profilers: - [- ] - - # Regexp of power supplies to ignore for the powersupplyclass collector. - [powersupply_ignored_supplies: | default = "^$"] - - # Path to runit service directory. - [runit_service_dir: | default = "/etc/service"] - - # XML RPC endpoint for the supervisord collector. - # - # Setting SUPERVISORD_URL in the environment will override the default value. - # An explicit value in the YAML config takes precedence over the environment - # variable. - [supervisord_url: | default = "http://localhost:9001/RPC2"] - - # Numeric sysctl values to expose. - # For sysctl with multiple numeric values, - # an optional mapping can be given to expose each value as its own metric. - sysctl_include: - [- ] - - # String sysctl values to expose. - sysctl_include_info: - [- ] - - # Regexp of systemd units to include. Units must both match include and not - # match exclude to be collected. - [systemd_unit_include: | default = ".+"] - - # Regexp of systemd units to exclude. Units must both match include and not - # match exclude to be collected. - [systemd_unit_exclude: | default = ".+\\.(automount|device|mount|scope|slice)"] - - # Enables service unit tasks metrics unit_tasks_current and unit_tasks_max - [systemd_enable_task_metrics: | default = false] - - # Enables service unit metric service_restart_total - [systemd_enable_restarts_metrics: | default = false] - - # Enables service unit metric unit_start_time_seconds - [systemd_enable_start_time_metrics: | default = false] - - # Regexp of tapestats devices to ignore. - [tapestats_ignored_devices: | default = "^$"] - - # Directory to read *.prom files from for the textfile collector. - [textfile_directory: | default = ""] - - # Regexp of fields to return for the vmstat collector. - [vmstat_fields: | default = "^(oom_kill|pgpg|pswp|pg.*fault).*"] -``` diff --git a/docs/sources/static/configuration/integrations/oracledb-config.md b/docs/sources/static/configuration/integrations/oracledb-config.md deleted file mode 100644 index 2937c9f4d2..0000000000 --- a/docs/sources/static/configuration/integrations/oracledb-config.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/oracledb-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/oracledb-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/oracledb-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/oracledb-config/ -description: Learn about oracledb_config -title: oracledb_config ---- - -# oracledb_config - -The `oracledb_config` block configures the `oracledb` integration, -which is an embedded version of a forked version of the -[`oracledb_exporter`](https://github.com/observiq/oracledb_exporter). This allows the collection of third party [OracleDB](https://www.oracle.com/database/) metrics. - -Full reference of options: - -```yaml - # Enables the oracledb integration, allowing the Agent to automatically - # collect metrics for the specified oracledb instance. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is the configured host:port of the connection string. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the oracledb integration is run but not scraped and thus not - # remote-written. Metrics for the integration are exposed at - # /integrations/oracledb/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, lets you drop series - # that you don't care about from the integration. - metric_relabel_configs: - [ - ... ] - - # How frequently the WAL is truncated for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The connection string used to connect to the OracleDB instance in the format - # of oracle://:@:/. - # i.e. "oracle://user:password@localhost:1521/orcl.localnet" - [connection_string: ] - - # The maximum amount of connections of the exporter allowed to be idle. - [max_idle_connections: ] - # The maximum amount of connections allowed to be open by the exporter. - [max_open_connections: ] - - # The number of seconds that will act as the query timeout when the exporter is querying against - # the OracleDB instance. - [query_timeout: | default = 5] -``` - -## Configuration example - -```yaml -integrations: - oracledb: - enabled: true - connection_string: oracle://user:password@localhost:1521/orcl.localnet - scrape_interval: 1m - scrape_timeout: 1m - scrape_integration: true -metrics: - wal_directory: /tmp/grafana-agent-wal -server: - log_level: debug -``` diff --git a/docs/sources/static/configuration/integrations/postgres-exporter-config.md b/docs/sources/static/configuration/integrations/postgres-exporter-config.md deleted file mode 100644 index 1bd2354c9e..0000000000 --- a/docs/sources/static/configuration/integrations/postgres-exporter-config.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/postgres-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/postgres-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/postgres-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/postgres-exporter-config/ -description: Learn about postgres_exporter_config -title: postgres_exporter_config ---- - -# postgres_exporter_config - -The `postgres_exporter_config` block configures the `postgres_exporter` -integration, which is an embedded version of -[`postgres_exporter`](https://github.com/prometheus-community/postgres_exporter). This -allows for the collection of metrics from Postgres servers. - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/prometheus-community/postgres_exporter#running-as-non-superuser). - -Full reference of options: - -```yaml - # Enables the postgres_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured postgres server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from a truncated version of - # the first DSN in data_source_names. The truncated DSN includes the hostname - # and database name (if used) of the server, but does not include any user - # information. - # - # If data_source_names contains more than one entry, the integration will fail to - # load and a value for instance must be manually provided. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the postgres_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/postgres_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # Data Source Names specifies the Postgres server(s) to connect to. This is - # REQUIRED but may also be specified by the POSTGRES_EXPORTER_DATA_SOURCE_NAME - # environment variable, where DSNs the environment variable are separated by - # commas. If neither are set, the integration will fail to start. - # - # The format of this is specified here: https://pkg.go.dev/github.com/lib/pq#ParseURL - # - # A working example value for a server with a password is: - # "postgresql://username:passwword@localhost:5432/database?sslmode=disable" - # - # Multiple DSNs may be provided here, allowing for scraping from multiple - # servers. - data_source_names: - - - - # Disables collection of metrics from pg_settings. - [disable_settings_metrics: | default = false] - - # Autodiscover databases to collect metrics from. If false, only collects - # metrics from databases collected from data_source_names. - [autodiscover_databases: | default = false] - - # Excludes specific databases from being collected when autodiscover_databases - # is true. - exclude_databases: - [ - ] - - # Includes only specific databases (excluding all others) when autodiscover_databases - # is true. - include_databases: - [ - ] - - # Path to a YAML file containing custom queries to run. Check out - # postgres_exporter's queries.yaml for examples of the format: - # https://github.com/prometheus-community/postgres_exporter/blob/master/queries.yaml - [query_path: | default = ""] - - # When true, only exposes metrics supplied from query_path. - [disable_default_metrics: | default = false] -``` diff --git a/docs/sources/static/configuration/integrations/process-exporter-config.md b/docs/sources/static/configuration/integrations/process-exporter-config.md deleted file mode 100644 index c6e888df77..0000000000 --- a/docs/sources/static/configuration/integrations/process-exporter-config.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/process-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/process-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/process-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/process-exporter-config/ -description: Learn about process_exporter_config -title: process_exporter_config ---- - -# process_exporter_config - -The `process_exporter_config` block configures the `process_exporter` integration, -which is an embedded version of -[`process-exporter`](https://github.com/ncabatoff/process-exporter) -and allows for collection metrics based on the /proc filesystem on Linux -systems. Note that on non-Linux systems, enabling this exporter is a no-op. - -Note that if running the Agent in a container, you will need to bind mount -folders from the host system so the integration can monitor them: - -``` -docker run \ - -v "/proc:/proc:ro" \ - -v /tmp/agent:/etc/agent \ - -v /path/to/config.yaml:/etc/agent-config/agent.yaml \ - grafana/agent:{{< param "AGENT_RELEASE" >}} \ - --config.file=/etc/agent-config/agent.yaml -``` - -Replace `/path/to/config.yaml` with the appropriate path on your host system -where an Agent config file can be found. - -For running on Kubernetes, ensure to set the equivalent mounts and capabilities -there as well: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: agent -spec: - containers: - - image: grafana/agent:{{< param "AGENT_RELEASE" >}} - name: agent - args: - - --config.file=/etc/agent-config/agent.yaml - volumeMounts: - - name: procfs - mountPath: /proc - readOnly: true - volumes: - - name: procfs - hostPath: - path: /proc -``` - -The manifest and Tanka configs provided by this repository do not have the -mounts or capabilities required for running this integration. - -An example config for `process_exporter_config` that tracks all processes is the -following: - -``` -enabled: true -process_names: -- name: "{{.Comm}}" - cmdline: - - '.+' -``` - -Full reference of options: - -```yaml - # Enables the process_exporter integration, allowing the Agent to automatically - # collect system metrics from the host UNIX system. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the process_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/process_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # procfs mountpoint. - [procfs_path: | default = "/proc"] - - # If a proc is tracked, track with it any children that aren't a part of their - # own group. - [track_children: | default = true] - - # Report on per-threadname metrics as well. - [track_threads: | default = true] - - # Gather metrics from smaps file, which contains proportional resident memory - # size. - [gather_smaps: | default = true] - - # Recheck process names on each scrape. - [recheck_on_scrape: | default = false] - - # A collection of matching rules to use for deciding which processes to - # monitor. Each config can match multiple processes to be tracked as a single - # process "group." - process_names: - [- ] -``` - -## process_matcher_config - -```yaml -# The name to use for identifying the process group name in the metric. By -# default, it uses the base path of the executable. -# -# The following template variables are available: -# -# - {{.Comm}}: Basename of the original executable from /proc//stat -# - {{.ExeBase}}: Basename of the executable from argv[0] -# - {{.ExeFull}}: Fully qualified path of the executable -# - {{.Username}}: Username of the effective user -# - {{.Matches}}: Map containing all regex capture groups resulting from -# matching a process with the cmdline rule group. -# - {{.PID}}: PID of the process. Note that the PID is copied from the -# first executable found. -# - {{.StartTime}}: The start time of the process. This is useful when combined -# with PID as PIDS get reused over time. -# - `{{.Cgroups}}`: The cgroups, if supported, of the process (`/proc/self/cgroup`). This is particularly useful for identifying to which container a process belongs. -# -# **NOTE**: Using `PID` or `StartTime` is discouraged, as it is almost never what you want, and is likely to result in high cardinality metrics. - - -[name: | default = "{{.ExeBase}}"] - -# A list of strings that match the base executable name for a process, truncated -# at 15 characters. It is derived from reading the second field of -# /proc//stat minus the parens. -# -# If any of the strings match, the process will be tracked. -comm: - [- ] - -# A list of strings that match argv[0] for a process. If there are no slashes, -# only the basename of argv[0] needs to match. Otherwise the name must be an -# exact match. For example, "postgres" may match any postgres binary but -# "/usr/local/bin/postgres" can only match a postgres at that path exactly. -# -# If any of the strings match, the process will be tracked. -exe: - [- ] - -# A list of regular expressions applied to the argv of the process. Each -# regex here must match the corresponding argv for the process to be tracked. -# The first element that is matched is argv[1]. -# -# Regex Captures are added to the .Matches map for use in the name. -cmdline: - [- ] -``` diff --git a/docs/sources/static/configuration/integrations/redis-exporter-config.md b/docs/sources/static/configuration/integrations/redis-exporter-config.md deleted file mode 100644 index 392fcb359c..0000000000 --- a/docs/sources/static/configuration/integrations/redis-exporter-config.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/redis-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/redis-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/redis-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/redis-exporter-config/ -description: Learn about redis_exporter_config -title: redis_exporter_config ---- - -# redis_exporter_config - -The `redis_exporter_config` block configures the `redis_exporter` integration, which is an embedded version of [`redis_exporter`](https://github.com/oliver006/redis_exporter). This allows for the collection of metrics from Redis servers. - -Note that currently, an Agent can only collect metrics from a single Redis server. If you want to collect metrics from multiple Redis servers, you can run multiple Agents and add labels using `relabel_configs` to differentiate between the Redis servers: - -```yaml -redis_exporter: - enabled: true - redis_addr: "redis-2:6379" - relabel_configs: - - source_labels: [__address__] - target_label: instance - replacement: redis-2 -``` - -We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory -security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/oliver006/redis_exporter#authenticating-with-redis). - -Full reference of options: -```yaml - # Enables the redis_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured redis address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname - # portion of redis_addr. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the redis_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/redis_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # Monitor the exporter itself and include those metrics in the results. - [include_exporter_metrics: | default = false] - - # exporter-specific configuration options - - # Address of the redis instance. - redis_addr: - - # User name to use for authentication (Redis ACL for Redis 6.0 and newer). - [redis_user: ] - - # Password of the redis instance. - [redis_password: ] - - # Path of a file containing a passord. If this is defined, it takes precedece - # over redis_password. - [redis_password_file: ] - - # Path of a file containing a JSON object which maps Redis URIs [string] to passwords [string] - # (e.g. {"redis://localhost:6379": "sample_password"}). - [redis_password_map_file: ] - - # Namespace for the metrics. - [namespace: | default = "redis"] - - # What to use for the CONFIG command. - [config_command: | default = "CONFIG"] - - # Comma separated list of key-patterns to export value and length/size, searched for with SCAN. - [check_keys: ] - - # Comma separated list of LUA regex for grouping keys. When unset, no key - # groups will be made. - [check_key_groups: ] - - # Check key or key groups batch size hint for the underlying SCAN. Keeping the same name for backwards compatibility, but this applies to both key and key groups batch size configuration. - [check_key_groups_batch_size: | default = 10000] - - # The maximum number of distinct key groups with the most memory utilization - # to present as distinct metrics per database. The leftover key groups will be - # aggregated in the 'overflow' bucket. - [max_distinct_key_groups: | default = 100] - - # Comma separated list of single keys to export value and length/size. - [check_single_keys: ] - - # Comma separated list of stream-patterns to export info about streams, groups and consumers, searched for with SCAN. - [check_streams: ] - - # Comma separated list of single streams to export info about streams, groups and consumers. - [check_single_streams: ] - - # Whether to export key values as labels when using `check_keys` or `check_single_keys`. - [export_key_values: | default = true] - - # Comma separated list of individual keys to export counts for. - [count_keys: ] - - # Comma-separated list of paths to Lua Redis scripts for collecting extra metrics. - [script_path: ] - - # Timeout for connection to Redis instance (in Golang duration format). - [connection_timeout: | default = "15s"] - - # Name of the client key file (including full path) if the server requires TLS client authentication. - [tls_client_key_file: ] - - # Name of the client certificate file (including full path) if the server requires TLS client authentication. - [tls_client_cert_file: ] - - # Name of the CA certificate file (including full path) if the server requires TLS client authentication. - [tls_ca_cert_file: ] - - # Whether to set client name to redis_exporter. - [set_client_name: ] - - # Whether to scrape Tile38 specific metrics. - [is_tile38: ] - - # Whether this is a redis cluster (Enable this if you need to fetch key level data on a Redis Cluster). - [is_cluster: | default = false] - - # Whether to scrape Client List specific metrics. - [export_client_list: ] - - # Whether to include the client's port when exporting the client list. Note - # that including this will increase the cardinality of all redis metrics. - [export_client_port: ] - - # Whether to also export go runtime metrics. - [redis_metrics_only: ] - - # Whether to ping the redis instance after connecting. - [ping_on_connect: ] - - # Whether to include system metrics like e.g. redis_total_system_memory_bytes. - [incl_system_metrics: ] - - # Whether to to skip TLS verification. - [skip_tls_verification: ] -``` diff --git a/docs/sources/static/configuration/integrations/snmp-config.md b/docs/sources/static/configuration/integrations/snmp-config.md deleted file mode 100644 index bd8cfcfe62..0000000000 --- a/docs/sources/static/configuration/integrations/snmp-config.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/snmp-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/snmp-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/snmp-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/snmp-config/ -description: Learn about snmp config -title: snmp config ---- - -# snmp config - -The `snmp` block configures the `snmp` integration, -which is an embedded version of -[`snmp_exporter`](https://github.com/prometheus/snmp_exporter). This allows collection of SNMP metrics from the network devices with ease. - -{{< admonition type="note" >}} -`snmp config` uses the latest configuration introduced in version 0.23 of the Prometheus `snmp_exporter`. -{{< /admonition >}} - -## Quick configuration example - -To get started, define SNMP targets in Grafana agent's integration block: - -```yaml -metrics: - wal_directory: /tmp/wal -integrations: - snmp: - enabled: true - snmp_targets: - - name: network_switch_1 - address: 192.168.1.2 - module: if_mib - walk_params: public - auth: public - - name: network_router_2 - address: 192.168.1.3 - module: mikrotik - walk_params: private - auth: private - walk_params: - private: - retries: 2 - public: - retries: 1 -``` - -## Prometheus service discovery use case - -If you need to scrape SNMP devices in more dynamic environment, and can't define devices in `snmp_targets` because targets would change over time, you can use service discovery approach. For instance, with [DNS discovery](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#dns_sd_config): - -```yaml - -metrics: - wal_directory: /tmp/wal - configs: - - name: snmp_targets - scrape_configs: - - job_name: 'snmp' - dns_sd_configs: - - names: - - switches.srv.example.org - - routers.srv.example.org - params: - module: [if_mib] - walk_params: [private] - auth: [private] - metrics_path: /integrations/snmp/metrics - relabel_configs: - - source_labels: [__address__] - target_label: __param_target - - source_labels: [__param_target] - target_label: instance - - replacement: 127.0.0.1:12345 # address must match grafana agent -server.http.address flag - target_label: __address__ -integrations: - snmp: - enabled: true - scrape_integration: false # set autoscrape to off - walk_params: - private: - retries: 2 -``` - - -Full reference of options: - -```yaml - # Enables the snmp integration, allowing the Agent to automatically - # collect metrics for the specified github objects. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the hostname portion - # of api_url. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the snmp integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/snmp/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # SNMP configuration file with custom modules. - # See https://github.com/prometheus/snmp_exporter#generating-configuration for more details how to generate custom snmp.yml file. - [config_file: | default = ""] - - # Embedded SNMP configuration. You can specify your modules here instead of an external config file. - # See https://github.com/prometheus/snmp_exporter/tree/main#generating-configuration for more details how to specify your SNMP modules. - # If this and config_file are not defined, embedded snmp_exporter default set of modules is used. - snmp_config: - [- ... ] - [- ... ] - - # List of SNMP targets to poll - snmp_targets: - [- ... ] - - # Map of SNMP connection profiles that can be used to override default SNMP settings. - walk_params: - [ : ... ] - - -``` -## snmp_target config - -```yaml - # Name of a snmp_target - [name: ] - - # The address of SNMP device - [address: ] - - # SNMP module to use for polling - [module: | default = ""] - - # SNMP authentication profile to use - [auth: | default = ""] - - # walk_param config to use for this snmp_target - [walk_params: | default = ""] -``` - -## walk_param config - -```yaml - # How many objects to request with GET/GETBULK, defaults to 25. - # May need to be reduced for buggy devices. - [max_repetitions: | default = 25] - - # How many times to retry a failed request, defaults to 3. - [retries: | default = 3] - - # Timeout for each SNMP request, defaults to 5s. - [timeout: | default = 5s] -``` - - -## About SNMP modules - -SNMP module is the set of SNMP counters to be scraped together from the specific network device. - -SNMP modules available can be found in the embedded snmp.yml file [here](https://github.com/grafana/agent/blob/main/internal/static/integrations/snmp_exporter/common/snmp.yml). If not specified, `if_mib` module is used. - -If you need to use custom SNMP modules, you can [generate](https://github.com/prometheus/snmp_exporter#generating-configuration) your own snmp.yml file and specify it using `config_file` parameter. diff --git a/docs/sources/static/configuration/integrations/snowflake-config.md b/docs/sources/static/configuration/integrations/snowflake-config.md deleted file mode 100644 index c648445a2d..0000000000 --- a/docs/sources/static/configuration/integrations/snowflake-config.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/snowflake-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/snowflake-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/snowflake-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/snowflake-config/ -description: Learn about snowflake_config -title: snowflake_config ---- - -# snowflake_config - -The `snowflake_config` block configures the `snowflake` integration, -which is an embedded version of -[`snowflake-prometheus-exporter`](https://github.com/grafana/snowflake-prometheus-exporter). This allows the collection of [Snowflake](https://www.snowflake.com/) metrics. - -Full reference of options: - -```yaml - # Enables the snowflake integration, allowing the Agent to automatically - # collect metrics for the specified snowflake account. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is the configured account_name. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the snowflake integration is run but not scraped and thus not - # remote-written. Metrics for the integration are exposed at - # /integrations/snowflake/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, lets you drop series - # from the integration that you don't care about from the integration. - metric_relabel_configs: - [ - ... ] - - # How frequently the WAL is truncated for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The account name of the snowflake account to monitor. - account_name: - - # Username for the database user used to scrape metrics. - username: - - # Password for the database user used to scrape metrics. - password: - - # The warehouse to use when querying metrics. - warehouse: - - # The role to use when connecting to the database. The ACCOUNTADMIN role is used by default. - [role: | default = "ACCOUNTADMIN"] - -``` -## Quick configuration example - -```yaml -integrations: - snowflake: - enabled: true - account_name: XXXXXXX-YYYYYYY - username: snowflake-user - password: snowflake-pass - warehouse: SNOWFLAKE_WAREHOUSE - role: ACCOUNTADMIN -``` diff --git a/docs/sources/static/configuration/integrations/squid-config.md b/docs/sources/static/configuration/integrations/squid-config.md deleted file mode 100644 index 6bfff685a3..0000000000 --- a/docs/sources/static/configuration/integrations/squid-config.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/squid-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/squid-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/squid-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/squid-config/ -description: Learn about squid_config -title: squid_config ---- - -# squid_config - -The `squid_config` block configures the `squid` integration, -which is an embedded version of a forked version of the [`Squid_exporter`](https://github.com/boynux/squid-exporter). This integration allows you to collect third-party [Squid](http://www.squid-cache.org/) metrics. - -Full reference of options: - -```yaml - # Enables the Squid integration, allowing the Agent to automatically - # collect metrics for the specified Squid instance. - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is the configured host:port of the connection string. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the Squid integration is run but not scraped and thus not - # remote-written. Metrics for the integration are exposed at - # /integrations/squid/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, lets you drop series - # that you don't care about from the integration. - metric_relabel_configs: - [ - ... ] - - # How frequently the WAL is truncated for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The address used to connect to the Squid instance in the format - # of :. - # i.e. "localhost:3128" - [address: ] - - # The username for squid instance. - [username: ] - - # The password for username above. - [password: ] -``` - -## Configuration example - -```yaml -integrations: - squid: - enabled: true - address: localhost:3128 - scrape_interval: 1m - scrape_timeout: 1m - scrape_integration: true -metrics: - wal_directory: /tmp/grafana-agent-wal -server: - log_level: debug -``` diff --git a/docs/sources/static/configuration/integrations/statsd-exporter-config.md b/docs/sources/static/configuration/integrations/statsd-exporter-config.md deleted file mode 100644 index 87c3145895..0000000000 --- a/docs/sources/static/configuration/integrations/statsd-exporter-config.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/statsd-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/statsd-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/statsd-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/statsd-exporter-config/ -description: Learn about statsd_exporter_config -title: statsd_exporter_config ---- - -# statsd_exporter_config - -The `statsd_exporter_config` block configures the `statsd_exporter` -integration, which is an embedded version of -[`statsd_exporter`](https://github.com/prometheus/statsd_exporter). This allows -for the collection of statsd metrics and exposing them as Prometheus metrics. - -Full reference of options: - -```yaml - # Enables the statsd_exporter integration, allowing the Agent to automatically - # collect system metrics from the configured statsd server address - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the statsd_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/statsd_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # The UDP address on which to receive statsd metric lines. An empty string - # will disable UDP collection. - [listen_udp: | default = ":9125"] - - # The TCP address on which to receive statsd metric lines. An empty string - # will disable TCP collection. - [listen_tcp: | default = ":9125"] - - # The Unixgram socket path to receive statsd metric lines. An empty string - # will disable unixgram collection. - [listen_unixgram: | default = ""] - - # The permission mode of the unixgram socket, when enabled. - [unix_socket_mode: | default = "755"] - - # An optional mapping config that can translate dot-separated StatsD metrics - # into labeled Prometheus metrics. For full instructions on how to write this - # object, see the official documentation from the statsd_exporter: - # - # https://github.com/prometheus/statsd_exporter#metric-mapping-and-configuration - # - # Note that a SIGHUP will not reload this config. - [mapping_config: ] - - # Size (in bytes) of the operating system's transmit read buffer associated - # with the UDP or unixgram connection. Please make sure the kernel parameters - # net.core.rmem_max is set to a value greater than the value specified. - [read_buffer: | default = 0] - - # Maximum size of your metric mapping cache. Relies on least recently used - # replacement policy if max size is reached. - [cache_size: | default = 1000] - - # Metric mapping cache type. Valid values are "lru" and "random". - [cache_type: | default = "lru"] - - # Size of internal queue for processing events. - [event_queue_size: | default = 10000] - - # Number of events to hold in queue before flushing. - [event_flush_threshold: | default = 1000] - - # Number of events to hold in queue before flushing. - [event_flush_interval: | default = "200ms"] - - # Parse DogStatsd style tags. - [parse_dogstatsd_tags: | default = true] - - # Parse InfluxDB style tags. - [parse_influxdb_tags: | default = true] - - # Parse Librato style tags. - [parse_librato_tags: | default = true] - - # Parse SignalFX style tags. - [parse_signalfx_tags: | default = true] - - # Optional: Relay address configuration. This setting, if provided, - # specifies the destination to forward your metrics. - - # Note that it must be a UDP endpoint in the format 'host:port'. - [relay_address: ] - - # Maximum relay output packet length to avoid fragmentation. - [relay_packet_length: | default = 1400] -``` diff --git a/docs/sources/static/configuration/integrations/windows-exporter-config.md b/docs/sources/static/configuration/integrations/windows-exporter-config.md deleted file mode 100644 index bcb753b086..0000000000 --- a/docs/sources/static/configuration/integrations/windows-exporter-config.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -aliases: -- ../../../configuration/integrations/windows-exporter-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/integrations/windows-exporter-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/integrations/windows-exporter-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/integrations/windows-exporter-config/ -description: Learn about windows_exporter_config -title: windows_exporter_config ---- - -# windows_exporter_config - -The `windows_exporter_config` block configures the `windows_exporter` -integration, which is an embedded version of -[`windows_exporter`](https://github.com/grafana/windows_exporter). This allows -for the collection of Windows metrics and exposing them as Prometheus metrics. - -Full reference of options: - -```yaml - # Enables the windows_exporter integration, allowing the Agent to automatically - # collect system metrics from the local windows instance - [enabled: | default = false] - - # Sets an explicit value for the instance label when the integration is - # self-scraped. Overrides inferred values. - # - # The default value for this integration is inferred from the agent hostname - # and HTTP listen port, delimited by a colon. - [instance: ] - - # Automatically collect metrics from this integration. If disabled, - # the consul_exporter integration will be run but not scraped and thus not - # remote-written. Metrics for the integration will be exposed at - # /integrations/windows_exporter/metrics and can be scraped by an external - # process. - [scrape_integration: | default = ] - - # How often should the metrics be collected? Defaults to - # prometheus.global.scrape_interval. - [scrape_interval: | default = ] - - # The timeout before considering the scrape a failure. Defaults to - # prometheus.global.scrape_timeout. - [scrape_timeout: | default = ] - - # Allows for relabeling labels on the target. - relabel_configs: - [- ... ] - - # Relabel metrics coming from the integration, allowing to drop series - # from the integration that you don't care about. - metric_relabel_configs: - [ - ... ] - - # How frequent to truncate the WAL for this integration. - [wal_truncate_frequency: | default = "60m"] - - # - # Exporter-specific configuration options - # - - # List of collectors to enable. Any non-experimental collector from the - # embedded version of windows_exporter can be enabled here. - [enabled_collectors: | default = "cpu,cs,logical_disk,net,os,service,system"] - - # Settings for collectors which accept configuration. Settings specified here - # are only used if the corresponding collector is enabled in - # enabled_collectors. - - # Configuration for Exchange Mail Server - exchange: - # Comma-separated List of collectors to use. Defaults to all, if not specified. - # Maps to collectors.exchange.enabled in windows_exporter - [enabled_list: ] - - # Configuration for the IIS web server - iis: - # Regexp of sites to whitelist. Site name must both match whitelist and not match blacklist to be included. - # Maps to collector.iis.site-whitelist in windows_exporter - [site_whitelist: | default = ".+"] - - # Regexp of sites to blacklist. Site name must both match whitelist and not match blacklist to be included. - # Maps to collector.iis.site-blacklist in windows_exporter - [site_blacklist: | default = ""] - - # Regexp of apps to whitelist. App name must both match whitelist and not match blacklist to be included. - # Maps to collector.iis.app-whitelist in windows_exporter - [app_whitelist: | default=".+"] - - # Regexp of apps to blacklist. App name must both match whitelist and not match blacklist to be included. - # Maps to collector.iis.app-blacklist in windows_exporter - [app_blacklist: | default=".+"] - - # Configuration for reading metrics from a text files in a directory - text_file: - # Directory to read text files with metrics from. - # Maps to collector.textfile.directory in windows_exporter - [text_file_directory: | default="C:\Program Files\windows_exporter\textfile_inputs"] - - # Configuration for SMTP metrics - smtp: - # Regexp of virtual servers to whitelist. Server name must both match whitelist and not match blacklist to be included. - # Maps to collector.smtp.server-whitelist in windows_exporter - [whitelist: | default=".+"] - - # Regexp of virtual servers to blacklist. Server name must both match whitelist and not match blacklist to be included. - # Maps to collector.smtp.server-blacklist in windows_exporter - [blacklist: | default=""] - - # Configuration for Windows Services - service: - # "WQL 'where' clause to use in WMI metrics query. Limits the response to the services you specify and reduces the size of the response. - # Maps to collector.service.services-where in windows_exporter - [where_clause: | default=""] - - # Configuration for physical disk on Windows - physical_disk: - # Regexp of volumes to include. Disk name must both match include and not match exclude to be included. - # Maps to collector.logical_disk.disk-include in windows_exporter. - [include: | default=".+"] - - # Regexp of volumes to exclude. Disk name must both match include and not match exclude to be included. - # Maps to collector.logical_disk.disk-exclude in windows_exporter. - [exclude: | default=".+"] - - # Configuration for Windows Processes - process: - # Regexp of processes to include. Process name must both match whitelist and not match blacklist to be included. - # Maps to collector.process.whitelist in windows_exporter - [whitelist: | default=".+"] - - # Regexp of processes to exclude. Process name must both match whitelist and not match blacklist to be included. - # Maps to collector.process.blacklist in windows_exporter - [blacklist: | default=""] - - # Configuration for NICs - network: - # Regexp of NIC's to whitelist. NIC name must both match whitelist and not match blacklist to be included. - # Maps to collector.net.nic-whitelist in windows_exporter - [whitelist: | default=".+"] - - # Regexp of NIC's to blacklist. NIC name must both match whitelist and not match blacklist to be included. - # Maps to collector.net.nic-blacklist in windows_exporter - [blacklist: | default=""] - - # Configuration for Microsoft SQL Server - mssql: - # Comma-separated list of mssql WMI classes to use. - # Maps to collectors.mssql.classes-enabled in windows_exporter - [enabled_classes: | default="accessmethods,availreplica,bufman,databases,dbreplica,genstats,locks,memmgr,sqlstats,sqlerrors,transactions"] - - # Configuration for Microsoft Queue - msqm: - # WQL 'where' clause to use in WMI metrics query. Limits the response to the msmqs you specify and reduces the size of the response. - # Maps to collector.msmq.msmq-where in windows_exporter - [where_clause: | default=""] - - # Configuration for disk information - logical_disk: - # Regexp of volumes to whitelist. Volume name must both match whitelist and not match blacklist to be included. - # Maps to collector.logical_disk.volume-whitelist in windows_exporter - [whitelist: | default=".+"] - - # Regexp of volumes to blacklist. Volume name must both match whitelist and not match blacklist to be included. - # Maps to collector.logical_disk.volume-blacklist in windows_exporter - [blacklist: | default=".+"] - - # Configuration for Windows Task Scheduler - scheduled_task: - # Regexp of tasks to include. - [include: | default ".+"] - #Regexp of tasks to exclude. - [exclude: | default ""] -``` diff --git a/docs/sources/static/configuration/logs-config.md b/docs/sources/static/configuration/logs-config.md deleted file mode 100644 index 56d8d06773..0000000000 --- a/docs/sources/static/configuration/logs-config.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -aliases: -- ../../configuration/logs-config/ -- ../../configuration/loki-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/logs-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/logs-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/logs-config/ -description: Learn about logs_config -title: logs_config -weight: 300 ---- - -# logs_config - -The `logs_config` block configures how the Agent collects logs and sends them to -a Loki push API endpoint. `logs_config` is identical to how Promtail is -configured, except deprecated fields have been removed and the server_config is -not supported. - -Refer to the -[Promtail documentation](/docs/loki/latest/clients/promtail/configuration/#clients) -for the supported values for these fields. - -```yaml -# Directory to store Loki Promtail positions files in. Positions files are -# required to read logs, and are used to store the last read offset of log -# sources. The positions files will be stored in -# /.yml. -# -# Optional only if every config has a positions.filename manually provided. -# -# This directory will be automatically created if it doesn't exist. -[positions_directory: ] - -# Configure values for all Loki Promtail instances. -[global: ] - -# Loki Promtail instances to run for log collection. -configs: - - [] -``` - -## global_config - -The `global_config` block configures global values for all launched Loki Promtail -instances. - -```yaml -clients: - - [] -# Configure how frequently log files from disk get polled for changes. -[file_watch_config: ] - -``` - -> **Note:** More information on the following types can be found on the -> documentation for Promtail: -> -> * [`promtail.client_config`](/docs/loki/latest/clients/promtail/configuration/#clients) - - -## file_watch_config - -The `file_watch_config` block configures how often to poll log files from disk -for changes: - -```yaml -# Minimum frequency to poll for files. Any time file changes are detected, the -# poll frequency gets reset to this duration. - [min_poll_frequency: | default = "250ms"] - # Maximum frequency to poll for files. Any time no file changes are detected, - # the poll frequency doubles in value up to the maximum duration specified by - # this value. - # - # The default is set to the same as min_poll_frequency. - [max_poll_frequency: | default = "250ms"] -``` - -## logs_instance_config - -The `logs_instance_config` block is an individual instance of Promtail with its -own set of scrape rules and where to forward logs. It is identical to how -Promtail is configured, except deprecated fields have been removed and the -`server_config` block is not supported. - -```yaml -# Name of this config. Required, and must be unique across all Loki configs. -# The name of the config will be the value of a logs_config label for all -# Loki Promtail metrics. -name: - -clients: - - [] - -# Optional configuration for where to store the positions files. If -# positions.filename is left empty, the file will be stored in -# /.yml. -# -# The directory of the positions file will automatically be created on start up -# if it doesn't already exist.. -[positions: ] - -scrape_configs: - - [] - -[target_config: ] - -[limits_config: ] -``` -> **Note:** More information on the following types can be found on the -> documentation for Promtail: -> -> * [`promtail.client_config`](/docs/loki/latest/clients/promtail/configuration/#clients) -> * [`promtail.scrape_config`](/docs/loki/latest/clients/promtail/configuration/#scrape_configs) -> * [`promtail.target_config`](/docs/loki/latest/clients/promtail/configuration/#target_config) -> * [`promtail.limits_config`](/docs/loki/latest/clients/promtail/configuration/#limits_config) - -> **Note:** Backticks in values are not supported. - -> **Note:** Because of how YAML treats backslashes in double-quoted strings, -> all backslashes in a regex expression must be escaped when using double -> quotes. But because of double processing, in Grafana Agent config file -> you must use quadruple backslash (`\\\\`) construction to add backslashes -> into regular expressions, here is example for `name=(\w+)\s` regex: -``` - selector: '{app="my-app"} |~ "name=(\\\\w+)\\\\s"' -``` - -Using single or double backslash construction produces the error: -``` -failed to make file target manager: invalid match stage config: invalid selector syntax for match stage: parse error at line 1, col 40: literal not terminated -``` -Using backticks produces the error: -``` -invalid match stage config: invalid selector syntax for match stage: parse error at line 1, col 51: syntax error: unexpected IDENTIFIER, expecting STRING" -``` diff --git a/docs/sources/static/configuration/metrics-config.md b/docs/sources/static/configuration/metrics-config.md deleted file mode 100644 index 296d3700b0..0000000000 --- a/docs/sources/static/configuration/metrics-config.md +++ /dev/null @@ -1,352 +0,0 @@ ---- -aliases: -- ../../configuration/metrics-config/ -- ../../configuration/prometheus-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/metrics-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/metrics-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/metrics-config/ -description: Learn about metrics_config -title: metrics_config -weight: 200 ---- - -# metrics_config - -The `metrics_config` block is used to define a collection of metrics -instances. Each instance defines a collection of Prometheus-compatible -scrape_configs and remote_write rules. Most users will only need to -define one instance. - -```yaml -# Configures the optional scraping service to cluster agents. -[scraping_service: ] - -# Configures the gRPC client used for agents to connect to other -# clustered agents. -[scraping_service_client: ] - -# Configure values for all Prometheus instances. -[global: ] - -# Configure the directory used by instances to store their WAL. -# -# The Grafana Agent assumes that all folders within wal_directory are managed by -# the agent itself. This means if you are using a PVC, you must point -# wal_directory to a subdirectory of the PVC mount. -[wal_directory: | default = "data-agent/"] - -# Configures how long ago an abandoned (not associated with an instance) WAL -# may be written to before being eligible to be deleted -[wal_cleanup_age: | default = "12h"] - -# Configures how often checks for abandoned WALs to be deleted are performed. -# A value of 0 disables periodic cleanup of abandoned WALs -[wal_cleanup_period: | default = "30m"] - -# Allows to disable HTTP Keep-Alives when scraping; the Agent will only use -# outgoing each connection for a single request. -[http_disable_keepalives: | default = false] - -# Allows to configure the maximum amount of time an idle Keep-Alive connection -# can remain idle before closing itself. Zero means no limit. -# The setting is ignored when `http_disable_keepalives` is enabled. -[http_idle_conn_timeout: | default = "5m"] - -# The list of Prometheus instances to launch with the agent. -configs: - [- ] - -# If an instance crashes abnormally, how long should we wait before trying -# to restart it. 0s disables the backoff period and restarts the agent -# immediately. -[instance_restart_backoff: | default = "5s"] - -# How to spawn instances based on instance configs. Supported values: shared, -# distinct. -[instance_mode: | default = "shared"] -``` - -## scraping_service_config - -The `scraping_service` block configures the [scraping service][scrape], an operational -mode where configurations are stored centrally in a KV store and a cluster of -agents distributes discovery and scrape load between nodes. - -```yaml -# Whether to enable scraping service mode. When enabled, local configs -# cannot be used. -[enabled: | default = false] - -# Note these next 3 configuration options are confusing. Due to backwards compatibility the naming -# is less than ideal. - -# How often should the agent manually refresh the configuration. Useful for if KV change -# events are not sent by an agent. -[reshard_interval: | default = "1m"] - -# The timeout for configuration refreshes. This can occur on cluster events or -# on the reshard interval. A timeout of 0 indicates no timeout. -[reshard_timeout: | default = "30s"] - -# The timeout for a cluster reshard events. A timeout of 0 indicates no timeout. -[cluster_reshard_event_timeout: | default = "30s"] - -# Configuration for the KV store to store configurations. -kvstore: - -# When set, allows configs pushed to the KV store to specify configuration -# fields that can read secrets from files. -# -# This is disabled by default. When enabled, a malicious user can craft an -# instance config that reads arbitrary files on the machine the Agent runs -# on and sends its contents to a specically crafted remote_write endpoint. -# -# If enabled, ensure that no untrusted users have access to the Agent API. -[dangerous_allow_reading_files: ] - -# Configuration for how agents will cluster together. -lifecycler: -``` - -## kvstore_config - -The `kvstore_config` block configures the KV store used as storage for -configurations in the scraping service mode. - -```yaml -# Which underlying KV store to use. Can be either consul or etcd -[store: | default = ""] - -# Key prefix to store all configurations with. Must end in /. -[prefix: | default = "configurations/"] - -# Configuration for a Consul client. Only applies if store -# is "consul" -consul: - # The hostname and port of Consul. - [host: | duration = "localhost:8500"] - - # The ACL Token used to interact with Consul. - [acltoken: ] - - # The HTTP timeout when communicating with Consul - [httpclienttimeout: | default = 20s] - - # Whether or not consistent reads to Consul are enabled. - [consistentreads: | default = true] - -# Configuration for an ETCD v3 client. Only applies if -# store is "etcd" -etcd: - # The ETCD endpoints to connect to. - endpoints: - - - - # The Dial timeout for the ETCD connection. - [dial_tmeout: | default = 10s] - - # The maximum number of retries to do for failed ops to ETCD. - [max_retries: | default = 10] -``` - -## lifecycler_config - -The `lifecycler_config` block configures the lifecycler; the component that -Agents use to cluster together. - -```yaml -# Configures the distributed hash ring storage. -ring: - # KV store for getting and sending distributed hash ring updates. - kvstore: - - # Specifies when other agents in the clsuter should be considered - # unhealthy if they haven't sent a heartbeat within this duration. - [heartbeat_timeout: | default = "1m"] - -# Number of tokens to generate for the distributed hash ring. -[num_tokens: | default = 128] - -# How often agents should send a heartbeat to the distributed hash -# ring. -[heartbeat_period: | default = "5s"] - -# How long to wait for tokens from other agents after generating -# a new set to resolve collisions. Useful only when using a gossip -# KV store. -[observe_period: | default = "0s"] - -# Period to wait before joining the ring. 0s means to join immediately. -[join_after: | default = "0s"] - -# Minimum duration to wait before marking the agent as ready to receive -# traffic. Used to work around race conditions for multiple agents exiting -# the distributed hash ring at the same time. -[min_ready_duration: | default = "1m"] - -# Network interfaces to resolve addresses defined by other agents -# registered in distributed hash ring. -[interface_names: | default = ["eth0", "en0"]] - -# Duration to sleep before exiting. Ensures that metrics get scraped -# before the process quits. -[final_sleep: | default = "30s"] - -# File path to store tokens. If empty, tokens will not be stored during -# shutdown and will not be restored at startup. -[tokens_file_path: | default = ""] - -# Availability zone of the host the agent is running on. Default is an -# empty string which disables zone awareness for writes. -[availability_zone: | default = ""] -``` - -## scraping_service_client_config - -The `scraping_service_client_config` block configures how clustered Agents will -generate gRPC clients to connect to each other. - -```yaml -grpc_client_config: - # Maximum size in bytes the gRPC client will accept from the connected server. - [max_recv_msg_size: | default = 104857600] - - # Maximum size in bytes the gRPC client will sent to the connected server. - [max_send_msg_size: | default = 16777216] - - # Whether messages should be gzipped. - [use_gzip_compression: | default = false] - - # The rate limit for gRPC clients; 0 means no rate limit. - [rate_limit: | default = 0] - - # gRPC burst allowed for rate limits. - [rate_limit_burst: | default = 0] - - # Controls if when a rate limit is hit whether the client should - # retry the request. - [backoff_on_ratelimits: | default = false] - - # Configures the retry backoff when backoff_on_ratelimits is - # true. - backoff_config: - # The minimum delay when backing off. - [min_period: | default = "100ms"] - - # The maximum delay when backing off. - [max_period: | default = "10s"] - - # The number of times to backoff and retry before failing. - [max_retries: | default = 10] -``` - -## global_config - -The `global_config` block configures global values for all launched Prometheus -instances. - -```yaml -# How frequently should Prometheus instances scrape. -[scrape_interval: duration | default = "1m"] - -# How long to wait before timing out a scrape from a target. -[scrape_timeout: duration | default = "10s"] - -# A list of static labels to add for all metrics. -external_labels: - { : } - -# Default set of remote_write endpoints. If an instance doesn't define any -# remote_writes, it will use this list. -remote_write: - - [] -``` - -> **Note:** For more information on remote_write, refer to the [Prometheus documentation](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#remote_write). -> -> The following default values set by Grafana Agent Static Mode are different than the default set by Prometheus: -> - `remote_write`: `send_exemplars` default value is `true` -> - `remote_write`: `queue_config`: `retry_on_http_429` default value is `true` - -## metrics_instance_config - -The `metrics_instance_config` block configures an individual metrics -instance, which acts as its own mini Prometheus-compatible agent, though -without support for the TSDB. - -```yaml -# Name of the instance. Must be present. Will be added as a label to agent -# metrics. -name: string - -# Whether this agent instance should only scrape from targets running on the -# same machine as the agent process. -[host_filter: | default = false] - -# Relabel configs to apply against discovered targets. The relabeling is -# temporary and just used for filtering targets. -host_filter_relabel_configs: - [ - ... ] - -# How frequently the WAL truncation process should run. Every iteration of -# the truncation will checkpoint old series and remove old samples. If data -# has not been sent within this window, some of it may be lost. -# -# The size of the WAL will increase with less frequent truncations. Making -# truncations more frequent reduces the size of the WAL but increases the -# chances of data loss when remote_write is failing for longer than the -# specified frequency. -[wal_truncate_frequency: | default = "60m"] - -# The minimum amount of time that series and samples should exist in the WAL -# before being considered for deletion. The consumed disk space of the WAL will -# increase by making this value larger. -# -# Setting this value to 0s is valid, but may delete series before all -# remote_write shards have been able to write all data, and may cause errors on -# slower machines. -[min_wal_time: | default = "5m"] - -# The maximum amount of time that series and samples may exist within the WAL -# before being considered for deletion. Series that have not received writes -# since this period will be removed, and all samples older than this period will -# be removed. -# -# This value is useful in long-running network outages, preventing the WAL from -# growing forever. -# -# Must be larger than min_wal_time. -[max_wal_time: | default = "4h"] - -# Deadline for flushing data when a Prometheus instance shuts down -# before giving up and letting the shutdown proceed. -[remote_flush_deadline: | default = "1m"] - -# When true, writes staleness markers to all active series to -# remote_write. -[write_stale_on_shutdown: | default = false] - -# A list of scrape configuration rules. -scrape_configs: - - [] - -# A list of remote_write targets. -remote_write: - - [] -``` - -> **Note:** More information on the following types can be found on the Prometheus -> website: -> -> * [`relabel_config`](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#relabel_config) -> * [`scrape_config`](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config) -> * [`remote_write`](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#remote_write) - -## Data retention - -{{< docs/shared source="agent" lookup="/wal-data-retention.md" version="" >}} - -{{% docs/reference %}} -[scrape]: "/docs/agent/ -> /docs/agent//static/configuration/scraping-service" -[scrape]: "/docs/grafana-cloud/ -> ./scraping-service" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/scraping-service.md b/docs/sources/static/configuration/scraping-service.md deleted file mode 100644 index ccfb2c67c6..0000000000 --- a/docs/sources/static/configuration/scraping-service.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -aliases: -- ../../configuration/scraping-service/ -- ../../scraping-service/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/scraping-service/ -- /docs/grafana-cloud/send-data/agent/static/configuration/scraping-service/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/scraping-service/ -description: Learn about the scraping service -menuTitle: Scraping service -title: Scraping service (Beta) -weight: 600 ---- - -# Scraping service (Beta) - -The Grafana Agent scraping service allows you to cluster a set of Agent processes and distribute the scrape load. - -Determining what to scrape is done by writing instance configuration files to an -[API][api], which then stores the configuration files in a KV store backend. -All agents in the cluster **must** use the same KV store to see the same set -of configuration files. - -Each process of the Grafana Agent can be running multiple independent -"instances" at once, where an "instance" refers to the combination of: - -- Service discovery for all `scrape_configs` within that loaded configuration -- Scrapes metrics from all discovered targets -- Stores data in its own Write-Ahead Log specific to the loaded configuration -- Remote Writes scraped metrics to the configured `remote_write` destinations - specified within the loaded configuration. - -The "instance configuration file," then, is the configuration file that -specifies the set of `scrape_configs` and `remote_write` endpoints. For example, -a small instance configuration file looks like this: - -```yaml -scrape_configs: - - job_name: self-scrape - static_configs: - - targets: ['localhost:9090'] - labels: - process: 'agent' -remote_write: - - url: http://cortex:9009/api/prom/push -``` - -The full set of supported options for an instance configuration file is -available in the -[`metrics-config.md` file][metrics]. - -Multiple instance configuration files are necessary for sharding. Each -config file is distributed to a particular agent on the cluster based on the -hash of its contents. - -When the scraping service is enabled, Agents **disallow** specifying -instance configurations locally in the configuration file; using the KV store -is required. [`agentctl`](#agentctl) can be used to manually sync -instance configuration files to the Agent's API server. - -## Distributed hash ring - -The scraping service uses a Distributed Hash Ring (commonly called "the -ring") to cluster agents and to shard configurations within that ring. Each -Agent joins the ring with a random distinct set of _tokens_ that are used for -sharding. The default number of generated tokens is 128. - -The Distributed Hash Ring is also stored in a KV store. Since a KV store is -also needed for storing configuration files, it is encouraged to re-use -the same KV store for the ring. - -When sharding, the Agent currently uses the name of a configuration file -stored in the KV store for load distribution. Configuration names are guaranteed to be -unique keys. The hash of the name is used as the _lookup key_ in the ring and -determines which agent (based on token) should be responsible for that configuration. -"Price is Right" rules are used for the Agent lookup; the Agent owning the token -with the closest value to the key without going over is responsible for the -configuration. - -All Agents are simultaneously watching the KV store for changes to the set of -configuration files. When a configuration file is added or updated in the configuration -store, each Agent will run the configuration name hash through their copy of the Hash -Ring to determine if they are responsible for that config. - -When an Agent receives a new configuration that it is responsible for, it launches a -new instance from the instance configuration. If a configuration is deleted from the KV store, -this will be detected by the owning Agent, and it will stop the metric collection -process for that configuration file. - -When an Agent receives an event for an updated configuration file that they used to -be the owner of but are no longer the owner, the associated instance for that -configuration file is stopped for that Agent. This can happen when the cluster -size changes. - -The scraping service currently does not support replication. Only one agent -at a time will be responsible for scraping a certain configuration. - -### Resharding - -When a new Agent joins or leaves the cluster, the set of tokens in the ring may -cause configurations to hash to a new Agent. The process of responding to this -action is called "resharding." - -Resharding is run: - -1. When an Agent joins the ring -2. When an Agent leaves the ring -3. When the KV store sends a notification indicating a configuration has changed. -4. On a specified interval if KV change events have not fired. - -The resharding process involves each Agent retrieving the full set of -configurations stored in the KV store and determining if: - -1. The configuration owned by the current resharding Agent has changed and needs to - be reloaded. -2. The configuration is no longer owned by the current resharding Agent and the - associated instance should be stopped. -3. The configuration has been deleted, and the associated instance should be stopped. - -## Best practices - -Because distribution is determined by the number of configuration files and not how -many targets exist per configuration file, the best amount of distribution is achieved -when each configuration file has the lowest amount of targets possible. The best -distribution will be achieved if each configuration file stored in the KV store is -limited to one static configuration with only one target. - -## Example - -Here's an example `agent.yaml` configuration file that uses the same `etcd` server for -both configuration storage and the distributed hash ring storage: - -```yaml -server: - log_level: debug - -metrics: - global: - scrape_interval: 1m - scraping_service: - enabled: true - kvstore: - store: etcd - etcd: - endpoints: - - etcd:2379 - lifecycler: - ring: - replication_factor: 1 - kvstore: - store: etcd - etcd: - endpoints: - - etcd:2379 -``` - -Note that there are no instance configurations present in this example; instance -configurations must be passed to the API for the Agent to start scraping metrics. - -## agentctl - -`agentctl` is a tool included with this repository that helps users interact -with the new Config Management API. The `agentctl config-sync` subcommand uses -local YAML files as a source of truth and syncs their contents with the API. -Entries in the API not in the synced directory will be deleted. - -`agentctl` is distributed in binary form with each release and as a Docker -container with the `grafana/agentctl` image. Tanka configurations that -utilize `grafana/agentctl` and sync a set of configurations to the API -are planned for the future. - -## Debug Ring endpoint - -You can use the `/debug/ring` endpoint to troubleshoot issues with the scraping service in Scraping Service Mode. -It provides information about the Distributed Hash Ring and the current distribution of configurations among Agents in the cluster. -It also allows you to forget an instance in the ring manually. - -You can access this endpoint by making an HTTP request to the Agent's API server. - -Information returned by the `/debug/ring` endpoint includes: - -- The list of Agents in the cluster, and their respective tokens used for sharding. -- The list of configuration files in the KV store and associated hash values used for lookup in the ring. -- The unique instance ID assigned to each instance of the Agent running in the cluster. - The instance ID is a unique identifier assigned to each running instance of the Agent within the cluster. - The exact details of the instance ID generation might be specific to the implementation of the Grafana Agent. -- The time of the "Last Heartbeat" of each instance. The Last Heartbeat is the last time the instance was active in the ring. - -{{% docs/reference %}} -[api]: "/docs/agent/ -> /docs/agent//static/api" -[api]: "/docs/grafana-cloud/ -> ../api" -[metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config" -[metrics]: "/docs/grafana-cloud/ -> ./metrics-config" -{{% /docs/reference %}} diff --git a/docs/sources/static/configuration/server-config.md b/docs/sources/static/configuration/server-config.md deleted file mode 100644 index aaba5fee0c..0000000000 --- a/docs/sources/static/configuration/server-config.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -aliases: -- ../../configuration/server-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/server-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/server-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/server-config/ -description: Learn about server_config -title: server_config -weight: 100 ---- - -# server_config - -The `server_config` block configures the Agent's behavior as an HTTP server, -gRPC server, and the log level for the whole process. - -The Agent exposes an HTTP server for scraping its own metrics and gRPC for the -scraping service mode. - -```yaml -# Log only messages with the given severity or above. Supported values [debug, -# info, warn, error]. This level affects logging for all Agent-level logs, not -# just the HTTP and gRPC server. -# -# Note that some integrations use their own loggers which ignore this -# setting. -[log_level: | default = "info"] - -# Log messages with the given format. Supported values [logfmt, json]. -# This affects logging for all Agent-levle logs, not just the HTTP and gRPC -# server. -# -# Note that some integrations use their own loggers which ignore this -# setting. -[log_format: | default = "logfmt"] - -# TLS configuration for the HTTP server. Required when the -# -server.http.tls-enabled flag is provided, ignored otherwise. -[http_tls_config: ] - -# TLS configuration for the gRPC server. Required when the -# -server.grpc.tls-enabled flag is provided, ignored otherwise. -[grpc_tls_config: ] -``` - -## server_tls_config - -The `server_tls_config` configures TLS. - -```yaml -# File path to the server certificate -[cert_file: ] - -# File path to the server key -[key_file: ] - -# Tells the server what is acceptable from the client, this drives the options in client_tls_config -[client_auth_type: ] - -# File path to the signing CA certificate, needed if CA is not trusted -[client_ca_file: ] - -# Windows certificate filter allows selecting client CA and server certificate from the Windows Certificate store -[windows_certificate_filter: ] -``` - -## windows_certificate_filter_config - -The `windows_certificate_filter_config` configures the use of the Windows Certificate store. Setting cert_file, key_file, and client_ca_file are invalid settings when using the windows_certificate_filter. - -```yaml -# Client configuration, optional. If nothing specific will use the default client ca root -[client: ] - -# Name of the store to look for the Client Certificate ex My, CA -server: -``` - - -### windows_client_config - -```yaml -# Array of issuer common names to check against -issuer_common_names: - [- ... ] - -# Regular expression to match Subject name -[subject_regex: ] - -# Client Template ID to match in ASN1 format ex "1.2.3" -[template_id: ] -``` - -### windows_server_config - -```yaml -# Name of the system store to look for the Server Certificate ex LocalMachine, CurrentUser -system_store: - -# Name of the store to look for the Server Certificate ex My, CA -store: - -# Array of issuer common names to check against -issuer_common_names: -[- ... ] - - -# Server Template ID to match in ASN1 format ex "1.2.3" -[template_id: ] - -# How often to refresh the server certificate ex 5m, 1h -[refresh_interval: ] -``` diff --git a/docs/sources/static/configuration/traces-config.md b/docs/sources/static/configuration/traces-config.md deleted file mode 100644 index 4ff3bfc85e..0000000000 --- a/docs/sources/static/configuration/traces-config.md +++ /dev/null @@ -1,482 +0,0 @@ ---- -aliases: -- ../../configuration/tempo-config/ -- ../../configuration/traces-config/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/configuration/traces-config/ -- /docs/grafana-cloud/send-data/agent/static/configuration/traces-config/ -canonical: https://grafana.com/docs/agent/latest/static/configuration/traces-config/ -description: Learn about traces_config -title: traces_config -weight: 400 ---- - -# traces_config - -The `traces_config` block configures a set of Tempo instances, each of which -configures its own tracing pipeline. Having multiple configs allows you to -configure multiple distinct pipelines, each of which collects spans and sends -them to a different location. - -{{< admonition type="note" >}} -If you are using multiple configs, you must manually set port numbers for -each receiver, otherwise they will all try to use the same port and fail to -start. -{{< /admonition >}} - -```yaml -configs: - [ - ... ] - ``` - -## traces_instance_config - -```yaml -# Name configures the name of this Tempo instance. Names must be non-empty and -# unique across all Tempo instances. The value of the name here will appear in -# logs and as a label on metrics. -name: - -# This field allows for the general manipulation of tags on spans that pass -# through this agent. A common use may be to add an environment or cluster -# variable. -[ attributes: ] - -# This field allows to configure grouping spans into batches. Batching helps -# better compress the data and reduce the number of outgoing connections -# required transmit the data. -[ batch: ] - -remote_write: - # host:port to send traces to. - # Here must be the port of gRPC receiver, not the Tempo default port. - # Example for cloud instances: `tempo-us-central1.grafana.net:443` - # For local / on-premises instances: `localhost:55680` or `tempo.example.com:14250` - # Note: for non-encrypted connections you must also set `insecure: true` - - endpoint: - - # Custom HTTP headers to be sent along with each remote write request. - # Be aware that 'authorization' header will be overwritten in presence - # of basic_auth. - headers: - [ : ... ] - - # Controls whether compression is enabled. - [ compression: | default = "gzip" | supported = "none", "gzip"] - - # Controls what protocol to use when exporting traces. - # Only "grpc" is supported in Grafana Cloud. - [ protocol: | default = "grpc" | supported = "grpc", "http" ] - - # Controls what format to use when exporting traces, in combination with protocol. - # protocol/format supported combinations are grpc/otlp and http/otlp. - # Only grpc/otlp is supported in Grafana Cloud. - [ format: | default = "otlp" | supported = "otlp" ] - - # Controls whether or not TLS is required. See https://godoc.org/google.golang.org/grpc#WithInsecure - [ insecure: | default = false ] - - # Deprecated in favor of tls_config - # If both `insecure_skip_verify` and `tls_config.insecure_skip_verify` are used, - # the latter take precedence. - [ insecure_skip_verify: | default = false ] - - # Configures opentelemetry exporters to use the OpenTelemetry auth extension `oauth2clientauthextension`. - # Can not be used in combination with `basic_auth`. - # See https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/extension/oauth2clientauthextension/README.md - oauth2: - # Configures the TLS settings specific to the oauth2 client - # The client identifier issued to the oauth client - [ client_id: ] - # The secret string associated with the oauth client - [ client_secret: ] - # Additional parameters for requests to the token endpoint - [ endpoint_params: ] - # The resource server's token endpoint URL - [ token_url: ] - # Optional, requested permissions associated with the oauth client - [ scopes: [] ] - # Optional, specifies the timeout fetching tokens from the token_url. Default: no timeout - [ timeout: ] - # TLS client configuration for the underneath client to authorization server. - # https://github.com/open-telemetry/opentelemetry-collector/blob/{{< param "OTEL_VERSION" >}}/config/configtls/README.md - tls: - # Disable validation of the server certificate. - [ insecure: | default = false ] - # InsecureSkipVerify will enable TLS but not verify the certificate. - [ insecure_skip_verify: | default = false ] - # ServerName requested by client for virtual hosting. - # This sets the ServerName in the TLSConfig. Please refer to - # https://godoc.org/crypto/tls#Config for more information. - [ server_name_override: ] - # Path to the CA cert. For a client this verifies the server certificate. If empty uses system root CA. - [ ca_file: ] - # In memory PEM encoded cert. - [ ca_pem: ] - # Path to the TLS cert to use for TLS required connections - [ cert_file: ] - # In memory PEM encoded TLS cert to use for TLS required connections. - [ cert_pem: ] - # Path to the TLS key to use for TLS required connections - [ key_file: ] - # In memory PEM encoded TLS key to use for TLS required connections. - [ key_pem: ] - # Minimum acceptable TLS version. - [ min_version: | default = "1.2" ] - # Maximum acceptable TLS version. - # If not set, it is handled by crypto/tls - currently it is "1.3". - [ max_version: | default = "" ] - # ReloadInterval specifies the duration after which the certificate will be reloaded. - # If not set, it will never be reloaded. - [ reload_interval: ] - - # Controls TLS settings of the exporter's client: - # https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#tls_config - # This should be used only if `insecure` is set to false - tls_config: - # Path to the CA cert. For a client this verifies the server certificate. If empty uses system root CA. - [ ca_file: ] - # Path to the TLS cert to use for TLS required connections - [ cert_file: ] - # Path to the TLS key to use for TLS required connections - [ key_file: ] - # Disable validation of the server certificate. - [ insecure_skip_verify: | default = false ] - - # Sets the `Authorization` header on every trace push with the - # configured username and password. - # password and password_file are mutually exclusive. - basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - - [ sending_queue: ] - [ retry_on_failure: ] - -# This processor writes a well formatted log line to a logs instance for each span, root, or process -# that passes through the Agent. This allows for automatically building a mechanism for trace -# discovery and building metrics from traces using Loki. It should be considered experimental. -automatic_logging: - # Indicates where the stream of log lines should go. Either supports writing - # to a logs instance defined in this same config or to stdout. - [ backend: | default = "stdout" | supported = "stdout", "logs_instance" ] - # Indicates the logs instance to write logs to. - # Required if backend is set to logs_instance. - [ logs_instance_name: ] - # Log one line per span. Warning! possibly very high volume - [ spans: ] - # Log one line for every root span of a trace. - [ roots: ] - # Log one line for every process - [ processes: ] - # Additional span attributes to log - [ span_attributes: ] - # Additional process attributes to log - [ process_attributes: ] - # Timeout on writing logs to Loki when backend is "logs_instance." - [ timeout: | default = 1ms ] - # Configures a set of key values that will be logged as labels - # They need to be span or process attributes logged in the log line - # - # This feature only applies when `backend = logs_instance` - # - # Loki only accepts alphanumeric and "_" as valid characters for labels. - # Labels are sanitized by replacing invalid characters with underscores. - [ labels: ] - overrides: - [ logs_instance_tag: | default = "traces" ] - [ service_key: | default = "svc" ] - [ span_name_key: | default = "span" ] - [ status_key: | default = "status" ] - [ duration_key: | default = "dur" ] - [ trace_id_key: | default = "tid" ] - -# Receiver configurations are mapped directly into the OpenTelemetry receivers -# block. At least one receiver is required. -# The Agent uses OpenTelemetry {{< param "OTEL_VERSION" >}}. Refer to the corresponding receiver's config. -# -# Supported receivers: otlp, jaeger, kafka, opencensus and zipkin. -receivers: - -# A list of prometheus scrape configs. Targets discovered through these scrape -# configs have their __address__ matched against the ip on incoming spans. If a -# match is found then relabeling rules are applied. -scrape_configs: - [ - ... ] -# Defines what method is used when adding k/v to spans. -# Options are `update`, `insert` and `upsert`. -# `update` only modifies an existing k/v and `insert` only appends if the k/v -# is not present. `upsert` does both. -[ prom_sd_operation_type: | default = "upsert" ] -# Configures what methods to use to do association between spans and pods. -# PromSD processor matches the IP address of the metadata labels from the k8s API -# with the IP address obtained from the specified pod association method. -# If a match is found then the span is labeled. -# -# Options are `ip`, `net.host.ip`, `k8s.pod.ip`, `hostname` and `connection`. -# - `ip`, `net.host.ip` and `k8s.pod.ip`, `hostname` match spans tags. -# - `connection` inspects the context from the incoming requests (gRPC and HTTP). -# -# Tracing instrumentation is commonly the responsible for tagging spans -# with IP address to the labels mentioned above. -# If running on kubernetes, `k8s.pod.ip` can be automatically attached via the -# downward API. For example, if you're using OTel instrumentation libraries, set -# OTEL_RESOURCE_ATTRIBUTES=k8s.pod.ip=$(POD_IP) to inject spans with the sender -# pod's IP. -# -# By default, all methods are enabled, and evaluated in the order specified above. -# Order of evaluation is honored when multiple methods are enabled. -prom_sd_pod_associations: - [ - ... ] - -# spanmetrics supports aggregating Request, Error and Duration (R.E.D) metrics -# from span data. -# -# spanmetrics generates two metrics from spans and uses remote_write or -# OpenTelemetry Prometheus exporters to serve the metrics locally. -# -# In order to use the remote_write exporter, you have to configure a Prometheus -# instance in the Agent and pass its name to the `metrics_instance` field. -# -# If you want to use the OpenTelemetry Prometheus exporter, you have to -# configure handler_endpoint and then scrape that endpoint. -# -# The first generated metric is `calls`, a counter to compute requests. -# The second generated metric is `latency`, a histogram to compute the -# operation's duration. -# -# If you want to rename the generated metrics, you can configure the `namespace` -# option of prometheus exporter. -# -# This is an experimental feature of Opentelemetry-Collector and the behavior -# may change in the future. -spanmetrics: - # latency_histogram_buckets and dimensions are the same as the configs in - # spanmetricsprocessor. - [ latency_histogram_buckets: ] - [ dimensions: ] - # const_labels are labels that will always get applied to the exported - # metrics. - const_labels: - [ : ... ] - # Metrics are namespaced to `traces_spanmetrics` by default. - # They can be further namespaced, i.e. `{namespace}_traces_spanmetrics` - [ namespace: ] - # metrics_instance is the metrics instance used to remote write metrics. - [ metrics_instance: ] - # handler_endpoint defines the endpoint where the OTel prometheus exporter will be exposed. - [ handler_endpoint: ] - # dimensions_cache_size defines the size of cache for storing Dimensions. - [ dimensions_cache_size: | default = 1000 ] - # aggregation_temporality configures whether to reset the metrics after flushing. - # It can be either AGGREGATION_TEMPORALITY_CUMULATIVE or AGGREGATION_TEMPORALITY_DELTA. - [ aggregation_temporality: | default = "AGGREGATION_TEMPORALITY_CUMULATIVE" ] - # metrics_flush_interval configures how often to flush generated metrics. - [ metrics_flush_interval: | default = 15s ] - -# tail_sampling supports tail-based sampling of traces in the agent. -# -# Policies can be defined that determine what traces are sampled and sent to the -# backends and what traces are dropped. -# -# In order to make a correct sampling decision it's important that the agent has -# a complete trace. This is achieved by waiting a given time for all the spans -# before evaluating the trace. -# -# Tail sampling also supports multi agent deployments, allowing to group all -# spans of a trace in the same agent by load balancing the spans by trace ID -# between the instances. -# * To make use of this feature, check load_balancing below * -tail_sampling: - # policies define the rules by which traces will be sampled. Multiple policies - # can be added to the same pipeline. - policies: - [ - ... ] - - # Time that to wait before making a decision for a trace. - # Longer wait times reduce the probability of sampling an incomplete trace at - # the cost of higher memory usage. - [ decision_wait: | default = 5s ] - - # Optional, number of traces kept in memory - [ num_traces: | default = 50000 ] - - # Optional, expected number of new traces (helps in allocating data structures) - [ expected_new_traces_per_sec: | default = 0 ] - -# load_balancing configures load balancing of spans across multi agent deployments. -# It ensures that all spans of a trace are sampled in the same instance. -# It works by exporting spans based on their traceID via consistent hashing. -# -# Enabling this feature is required for "tail_sampling", "spanmetrics", and "service_graphs" -# to correctly work when spans are ingested by multiple agent instances. -# -# Load balancing works by layering two pipelines and consistently exporting -# spans belonging to a trace to the same agent instance. -# Agent instances need to be able to communicate with each other via gRPC. -# -# When load_balancing is enabled: -# 1. When an Agent receives spans from the configured "receivers". -# 2. If the "attributes" processor is configured, it will run through all the spans. -# 3. The spans will be exported using the "load_balancing" configuration to any of the Agent instances. -# This may or may not be the same Agent which has already received the span. -# 4. The Agent which received the span from the loadbalancer will run these processors, -# in this order, if they are configured: -# 1. "spanmetrics" -# 2. "service_graphs" -# 3. "tail_sampling" -# 4. "automatic_logging" -# 5. "batch" -# 5. The spans are then remote written using the "remote_write" configuration. -# -# Load balancing significantly increases CPU usage. This is because spans are -# exported an additional time between agents. -load_balancing: - # resolver configures the resolution strategy for the involved backends - # It can be either "static", "dns" or "kubernetes". - resolver: - static: - # A fixed list of hostnames. - hostnames: - [ - ... ] - dns: - # DNS hostname from which to resolve IP addresses. - hostname: - # Port number to use with the resolved IP address when exporting spans. - [ port: | default = 4317 ] - # Resolver interval - [ interval: | default = 5s ] - # Resolver timeout - [ timeout: | default = 1s ] - # The kubernetes resolver receives IP addresses of a Kubernetes service - # from the Kubernetes API. It does not require polling. The Kubernetes API - # notifies the Agent when a new pod is available and when an old pod has exited. - # - # For the kubernetes resolver to work, Agent must be running under - # a system account with "list", "watch" and "get" permissions. - kubernetes: - service: - [ ports: | default = 4317 ] - - # routing_key can be either "traceID" or "service": - # * "service": exports spans based on their service name. - # * "traceID": exports spans based on their traceID. - [ routing_key: | default = "traceID" ] - - # receiver_port is the port the instance will use to receive load balanced traces - receiver_port: [ | default = 4318 ] - - # Load balancing is done via an otlp exporter. - # The remaining configuration is common with the remote_write block. - exporter: - # Controls whether compression is enabled. - [ compression: | default = "gzip" | supported = "none", "gzip"] - - # Controls whether or not TLS is required. - [ insecure: | default = false ] - - # Disable validation of the server certificate. Only used when insecure is set - # to false. - [ insecure_skip_verify: | default = false ] - - # Sets the `Authorization` header on every trace push with the - # configured username and password. - # password and password_file are mutually exclusive. - basic_auth: - [ username: ] - [ password: ] - [ password_file: ] - -# service_graphs configures processing of traces for building service graphs in -# the form of prometheus metrics. The generated metrics represent edges between -# nodes in the graph. Nodes are represented by `client` and `server` labels. -# -# e.g. tempo_service_graph_request_total{client="app", server="db"} 20 -# -# Service graphs works by inspecting spans and looking for the tag `span.kind`. -# If it finds the span kind to be client or server, it stores the request in a -# local in-memory store. -# -# That request waits until its corresponding client or server pair span is -# processed or until the maximum waiting time has passed. -# When either of those conditions is reached, the request is processed and -# removed from the local store. If the request is complete by that time, it'll -# be recorded as an edge in the graph. -# -# Service graphs supports multi-agent deployments, allowing to group all spans -# of a trace in the same agent by load balancing the spans by trace ID between -# the instances. -# * To make use of this feature, check load_balancing above * -service_graphs: - [ enabled: | default = false ] - - # configures the time the processor will wait since a span is consumed until - # it's considered expired if its paired has not been processed. - # - # increasing the waiting time will increase the percentage of paired spans. - # retaining unpaired spans for longer will make reaching max_items more likely. - [ wait: | default = 10s ] - - # configures the max amount of edges that will be stored in memory. - # - # spans that arrive to the processor that do not pair with an already - # processed span are dropped. - # - # a higher max number of items increases the max throughput of processed spans - # with a higher memory consumption. - [ max_items: | default = 10_000 ] - - # configures the number of workers that will process completed edges concurrently. - # as edges are completed, they get queued to be collected as metrics for the graph. - [ workers: | default = 10 ] - - # configures what status codes are considered as successful (e.g. HTTP 404). - # - # by default, a request is considered failed in the following cases: - # 1. HTTP status is not 2XX - # 1. gRPC status code is not OK - # 1. span status is Error - success_codes: - # http status codes not to be considered as failure - http: - [ - ... ] - # grpc status codes not to be considered as failure - grpc: - [ - ... ] - -# jaeger_remote_sampling configures one or more jaeger remote sampling extensions. -# For more details about the configuration please consult the OpenTelemetry documentation: -# https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/extension/jaegerremotesampling -# -# Example config: -# -# jaeger_remote_sampling: -# - source: -# remote: -# endpoint: jaeger-collector:14250 -# tls: -# insecure: true -# - source: -# reload_interval: 1s -# file: /etc/otelcol/sampling_strategies.json -# -jaeger_remote_sampling: - [ - ... ] -``` - -More information on the following types can be found on the documentation for their respective projects: - -* [`attributes.config`: OpenTelemetry-Collector](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/processor/attributesprocessor) -* [`batch.config`: OpenTelemetry-Collector](https://github.com/open-telemetry/opentelemetry-collector/tree/{{< param "OTEL_VERSION" >}}/processor/batchprocessor) -* [`otlpexporter.sending_queue`: OpenTelemetry-Collector](https://github.com/open-telemetry/opentelemetry-collector/tree/{{< param "OTEL_VERSION" >}}/exporter/otlpexporter) -* [`otlpexporter.retry_on_failure`: OpenTelemetry-Collector](https://github.com/open-telemetry/opentelemetry-collector/tree/{{< param "OTEL_VERSION" >}}/exporter/otlpexporter) -* `receivers`: - * [`jaegerreceiver`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/receiver/jaegerreceiver) - * [`kafkareceiver`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/receiver/kafkareceiver) - * [`otlpreceiver`: OpenTelemetry-Collector](https://github.com/open-telemetry/opentelemetry-collector/tree/{{< param "OTEL_VERSION" >}}/receiver/otlpreceiver) - * [`opencensusreceiver`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/receiver/opencensusreceiver) - * [`zipkinreceiver`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/receiver/zipkinreceiver) -* [`scrape_config`: Prometheus](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#scrape_config) -* [`spanmetricsprocessor.latency_histogram_buckets`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/processor/spanmetricsprocessor/config.go#L37-L39) -* [`spanmetricsprocessor.dimensions`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/processor/spanmetricsprocessor/config.go#L41-L48) -* [`tailsamplingprocessor.policies`: OpenTelemetry-Collector-Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/processor/tailsamplingprocessor) diff --git a/docs/sources/static/operation-guide/_index.md b/docs/sources/static/operation-guide/_index.md deleted file mode 100644 index f50c335574..0000000000 --- a/docs/sources/static/operation-guide/_index.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -aliases: -- ../operation-guide/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/operation-guide/ -- /docs/grafana-cloud/send-data/agent/static/operation-guide/ -canonical: https://grafana.com/docs/agent/latest/static/operation-guide/ -description: Learn how to operate Grafana Agent -title: Operation guide -weight: 700 ---- - -# Operation guide - -This guide helps you operate Grafana Agent. - -## Horizontal Scaling - -There are three options to horizontally scale your deployment of Grafana Agents: - -- [Host filtering](#host-filtering-beta) requires you to run one Agent on every - machine you wish to collect metrics from. Agents will only collect metrics - from the machines they run on. -- [Hashmod sharding](#hashmod-sharding-stable) allows you to roughly shard the - discovered set of targets by using hashmod/keep relabel rules. -- The [scraping service][scrape] allows you to cluster Grafana - Agents and have them distribute per-tenant configs throughout the cluster. - -Each has their own set of tradeoffs: - -- Host Filtering (Beta) - - Pros - - Does not need specialized configs per agent - - No external dependencies required to operate - - Cons - - Can cause significant load on service discovery APIs - - Requires each Agent to have the same list of scrape configs/remote_writes -- Hashmod sharding (Stable) - - Pros - - Exact control on the number of shards to run - - Smaller load on SD compared to host filtering (as there are a smaller # of - Agents) - - No external dependencies required to operate - - Cons - - Each Agent must have a specialized config with their shard number inserted - into the hashmod/keep relabel rule pair. - - Requires each Agent to have the same list of scrape configs/remote_writes, - with the exception of the hashmod rule being different. - - Hashmod is not [consistent hashing](https://en.wikipedia.org/wiki/Consistent_hashing), - so up to 100% of jobs will move to a new machine when scaling shards. -- Scraping service (Beta) - - Pros - - Agents don't have to have a synchronized set of scrape configs / remote_writes - (they pull from a centralized location). - - Exact control on the number of shards to run. - - Uses [consistent hashing](https://en.wikipedia.org/wiki/Consistent_hashing), - so only 1/N jobs will move to a new machine when scaling shards. - - Smallest load on SD compared to host filtering, as only one Agent is - responsible for a config. - - Cons - - Centralized configs must discover a [minimal set of targets][targets] - to distribute evenly. - - Requires running a separate KV store to store the centralized configs. - - Managing centralized configs adds operational burden over managing a config - file. - -## Host filtering (Beta) - -Host filtering implements a form of "dumb sharding," where operators may deploy -one Grafana Agent instance per machine in a cluster, all using the same -configuration, and the Grafana Agents will only scrape targets that are -running on the same node as the Agent. - -Running with `host_filter: true` means that if you have a target whose host -machine is not also running a Grafana Agent process, _that target will not -be scraped!_ - -Host filtering is usually paired with a dedicated Agent process that is used for -scraping targets that are running outside of a given cluster. For example, when -running the Grafana Agent on GKE, you would have a DaemonSet with -`host_filter` for scraping in-cluster targets, and a single dedicated Deployment -for scraping other targets that are not running on a cluster node, such as the -Kubernetes control plane API. - -If you want to scale your scrape load without host filtering, you can use the -[scraping service][scrape] instead. - -The host name of the Agent is determined by reading `$HOSTNAME`. If `$HOSTNAME` -isn't defined, the Agent will use Go's [os.Hostname](https://golang.org/pkg/os/#Hostname) -to determine the hostname. - -The following meta-labels are used to determine if a target is running on the -same machine as the Agent: - -- `__address__` -- `__meta_consul_node` -- `__meta_dockerswarm_node_id` -- `__meta_dockerswarm_node_hostname` -- `__meta_dockerswarm_node_address` -- `__meta_kubernetes_pod_node_name` -- `__meta_kubernetes_node_name` -- `__host__` - -The final label, `__host__`, isn't a label added by any Prometheus service -discovery mechanism. Rather, `__host__` can be generated by using -`host_filter_relabel_configs`. This allows for custom relabeling -rules to determine the hostname where the predefined ones fail. Relabeling rules -added with `host_filter_relabel_configs` are temporary and just used for the -host_filtering mechanism. Full relabeling rules should be applied in the -appropriate `scrape_config` instead. - -Note that scrape_config `relabel_configs` do not apply to the host filtering -logic; only `host_filter_relabel_configs` will work. - -If the determined hostname matches any of the meta labels, the discovered target -is allowed. Otherwise, the target is ignored, and will not show up in the -[targets API][api]. - -## Hashmod sharding (Stable) - -Grafana Agents can be sharded by using a pair of hashmod/keep relabel rules. -These rules will hash the address of a target and modulus it with the number -of Agent shards that are running. - -```yaml -scrape_configs: -- job_name: some_job - # Add usual service discovery here, such as static_configs - relabel_configs: - - source_labels: [__address__] - modulus: 4 # 4 shards - target_label: __tmp_hash - action: hashmod - - source_labels: [__tmp_hash] - regex: ^1$ # This is the 2nd shard - action: keep -``` - -Add the `relabel_configs` to all of your scrape_config blocks. Ensure that each -running Agent shard has a different value for the `regex`; the first Agent shard -should have `^0$`, the second should have `^1$`, and so on, up to `^3$`. - -This sharding mechanism means each Agent will ignore roughly 1/N of the total -targets, where N is the number of shards. This allows for horizontal scaling the -number of Agents and distributing load between them. - -Note that the hashmod used here is not a consistent hashing algorithm; this -means that changing the number of shards may cause any number of targets to move -to a new shard, up to 100%. When moving to a new shard, any existing data in the -WAL from the old machine is effectively discarded. - -## Prometheus instances - -The Grafana Agent defines a concept of a Prometheus _Instance_, which is -its own mini Prometheus-lite server. The instance runs a combination of -Prometheus service discovery, scraping, a WAL for storage, and `remote_write`. - -Instances allow for fine grained control of what data gets scraped and where it -gets sent. Users can easily define two Instances that scrape different subsets -of metrics and send them to two completely different remote_write systems. - -Instances are especially relevant to the [scraping service mode][scrape], -where breaking up your scrape configs into multiple Instances is required for -sharding and balancing scrape load across a cluster of Agents. - -## Instance sharing (Stable) - -The v0.5.0 release of the Agent introduced the concept of _instance sharing_, -which combines scrape_configs from compatible instance configs into a single, -shared Instance. Instance configs are compatible when they have no differences -in configuration with the exception of what they scrape. `remote_write` configs -may also differ in the order which endpoints are declared, but the unsorted -`remote_writes` must still be an exact match. - -In the shared instances mode, the `name` field of `remote_write` configs is -ignored. The resulting `remote_write` configs will have a name identical to the -first six characters of the group name and the first six characters of the hash -from that `remote_write` config separated by a `-`. - -The shared instances mode is the new default, and the previous behavior is -deprecated. If you wish to restore the old behavior, set `instance_mode: distinct` -in the [`metrics_config`][metrics] block of your config file. - -Shared instances are completely transparent to the user with the exception of -exposed metrics. With `instance_mode: shared`, metrics for Prometheus components -(WAL, service discovery, remote_write, etc.) have a `instance_group_name` label, -which is the hash of all settings used to determine the shared instance. When -`instance_mode: distinct` is set, the metrics for Prometheus components will -instead have an `instance_name` label, which matches the name set on the -individual Instance config. It is recommended to use the default of -`instance_mode: shared` unless you don't mind the performance hit and really -need granular metrics. - -Users can use the [targets API][api] to see all scraped targets, and the name -of the shared instance they were assigned to. - -{{% docs/reference %}} -[scrape]: "/docs/agent/ -> /docs/agent//static/configuration/scraping-service" -[scrape]: "/docs/grafana-cloud/ -> ../configuration/scraping-service" -[targets]: "/docs/agent/ -> /docs/agent//static/configuration/scraping-service#best-practices" -[targets]: "/docs/grafana-cloud/ -> ../configuration/scraping-service#best-practices" -[api]: "/docs/agent/ -> /docs/agent//static/api#agent-api" -[api]: "/docs/grafana-cloud/ -> ../api#agent-api" -[metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config" -[metrics]: "/docs/grafana-cloud/ -> ../configuration/metrics-config" -{{% /docs/reference %}} diff --git a/docs/sources/static/release-notes.md b/docs/sources/static/release-notes.md deleted file mode 100644 index 90afd41dfc..0000000000 --- a/docs/sources/static/release-notes.md +++ /dev/null @@ -1,1140 +0,0 @@ ---- -aliases: -- ../upgrade-guide/ -- ./upgrade-guide/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/release-notes/ -- /docs/grafana-cloud/send-data/agent/static/release-notes/ -canonical: https://grafana.com/docs/agent/latest/static/release-notes/ -description: Release notes for Grafana Agent static mode -menuTitle: Release notes -title: Release notes -weight: 999 ---- - -# Release notes - -The release notes provide information about deprecations and breaking changes in Grafana Agent static mode. - -For a complete list of changes to Grafana Agent, with links to pull requests and related issues when available, refer to the [Changelog](https://github.com/grafana/agent/blob/main/CHANGELOG.md). - -> **Note:** These release notes are specific to Grafana Agent static mode. -> Other release notes for the different Grafana Agent variants are contained on separate pages: -> -> * [Static mode Kubernetes operator release notes][release-notes-operator] -> * [Flow mode release notes][release-notes-flow] - -{{% docs/reference %}} -[release-notes-operator]: "/docs/agent/ -> /docs/agent//operator/release-notes" -[release-notes-operator]: "/docs/grafana-cloud/ -> ../operator/release-notes" - -[release-notes-flow]: "/docs/agent/ -> /docs/agent//flow/release-notes" -[release-notes-flow]: "/docs/grafana-cloud/ -> /docs/agent//flow/release-notes" - -[Modules]: "/docs/agent/ -> /docs/agent//flow/concepts/modules" -[Modules]: "/docs/grafana-cloud/ -> /docs/agent//flow/concepts/modules" -{{% /docs/reference %}} - -## v0.38 - -### Breaking change: support for exporting Jaeger traces removed - -The deprecated support for exporting Jaeger-formatted traces has been removed. -To send traces to Jaeger, export OTLP-formatted data to a version of Jaeger -that supports OTLP. - -## v0.37 - -### Breaking change: The default value of `retry_on_http_429` is overriden to `true` for the `queue_config` in `remote_write` in `metrics` config. - -{{< admonition type="note" >}} -The default set by Grafana Agent Static Mode is different than the default set by Prometheus. -{{< /admonition >}} - -The Prometheus default value for `retry_on_http_429` is set to `true` for the `queue_config` in `remote_write`. -This changed default setting allows the agent to retry sending data when it receives an HTTP 429 error and helps avoid losing data in metric pipelines. - -* If you explicitly set the `retry_on_http_429`, no action is required. -* If you do not explicitly set `retry_on_http_429` and you do *not* want to retry on HTTP 429, make sure you set it to `false` when you upgrade to this new version. - -### Breaking change: Renamed `non_indexed_labels` Loki processing stage to `structured_metadata`. - -If you use the Loki processing stage in your Agent configuration, you must rename the `non_indexed_labels` pipeline stage definition to `structured_metadata`. - - -Old configuration example: - -```yaml - pipeline_stages: - - logfmt: - mapping: - app: - - non_indexed_labels: - app: -``` - -New configuration example: - -```yaml - pipeline_stages: - - logfmt: - mapping: - app: - - structured_metadata: - app: -``` - -## v0.35 - -### Breaking change: Jaeger remote sampling no longer configurable using the Jaeger receiver - -Jaeger remote sampling used to be configured using the Jaeger receiver configuration. This receiver was updated to a new version, where support for remote sampling in the receiver was removed. - -Jaeger remote sampling is available as a separate configuration field starting in v0.35.3. - -Old configuration example: - -```yaml -receivers: - jaeger: - protocols: - grpc: - remote_sampling: - strategy_file: /etc/agent/strategies.json - strategy_file_reload_interval: 1s -``` - -New configuration example: - -```yaml -jaeger_remote_sampling: - - source: - file: /etc/agent/strategies.json - reload_interval: 1s -``` - -### Breaking change: `auth` and `version` attributes from `walk_params` block of SNMP integration have been removed - -The SNMP integrations (both v1 and v2) wrap a new version of SNMP exporter which introduces a new configuration file format. -This new format separates the walk and metric mappings from the connection and authentication settings. This allows for easier configuration of different -auth params without having to duplicate the full walk and metric mapping. - -Old configuration example: - -```yaml - snmp_targets: - - name: network_switch_1 - address: 192.168.1.2 - module: if_mib - walk_params: public - auth: public - walk_params: - public: - retries: 2 - version: 2 - auth: - community: public -``` - -New configuration example: - -```yaml - snmp_targets: - - name: network_switch_1 - address: 192.168.1.2 - module: if_mib - walk_params: public - auth: public - walk_params: - public: - retries: 2 -``` - -See [Module and Auth Split Migration](https://github.com/prometheus/snmp_exporter/blob/main/auth-split-migration.md) for more details. - -### Removal of Dynamic Configuration - -The experimental feature Dynamic Configuration has been removed. The use case of dynamic configuration will be replaced -with [Modules][] in Grafana Agent Flow. - -### Breaking change: Removed and renamed tracing metrics - -In the traces subsystem for Static mode some metrics are removed and others are renamed. -The reason for the removal is a bug which caused the metrics to be incorrect if more than one instance of a traces configuration is specified. - -Removed metrics: -- "blackbox_exporter_config_last_reload_success_timestamp_seconds" (gauge) -- "blackbox_exporter_config_last_reload_successful" (gauge) -- "blackbox_module_unknown_total" (counter) -- "traces_processor_tail_sampling_count_traces_sampled" (counter) -- "traces_processor_tail_sampling_new_trace_id_received" (counter) -- "traces_processor_tail_sampling_sampling_decision_latency" (histogram) -- "traces_processor_tail_sampling_sampling_decision_timer_latency" (histogram) -- "traces_processor_tail_sampling_sampling_policy_evaluation_error" (counter) -- "traces_processor_tail_sampling_sampling_trace_dropped_too_early" (counter) -- "traces_processor_tail_sampling_sampling_traces_on_memory" (gauge) -- "traces_receiver_accepted_spans" (counter) -- "traces_receiver_refused_spans" (counter) -- "traces_exporter_enqueue_failed_log_records" (counter) -- "traces_exporter_enqueue_failed_metric_points" (counter) -- "traces_exporter_enqueue_failed_spans" (counter) -- "traces_exporter_queue_capacity" (gauge) -- "traces_exporter_queue_size" (gauge) - -Renamed metrics: -- "traces_receiver_refused_spans" is renamed to "traces_receiver_refused_spans_total" -- "traces_receiver_accepted_spans" is renamed to "traces_receiver_refused_spans_total" -- "traces_exporter_sent_metric_points" is renamed to "traces_exporter_sent_metric_points_total" - -## v0.33 - -### Symbolic links in Docker containers removed - -We've removed the deprecated symbolic links to `/bin/agent*` in Docker -containers, as planned in v0.31. In case you're setting a custom entrypoint, -use the new binaries that are prefixed with `/bin/grafana*`. - -### Deprecation of Dynamic Configuration - -[Dynamic Configuration](/docs/agent/v0.33/cookbook/dynamic-configuration/) will be removed in v0.34. -The use case of dynamic configuration will be replaced with Modules in Grafana Agent Flow. - -## v0.32 - -### Breaking change: `node_exporter` configuration options changed - -With the update of the `node_exporter` integration to use v1.5.0, configuration -options for the `diskstats` collector have changed names: - -- `diskstats_ignored_devices` is now `diskstats_device_exclude` in the static - mode configuration. -- `ignored_devices` is now `device_exclude` in the Flow component - configuration. - -## v0.31.1 - -### Breaking change: all Windows executables are now zipped - -All release Windows `.exe` files are now zipped. Prior to v0.31, only -`grafana-agent-installer.exe` was unzipped. - -This fixes an issue from v0.31.0 where all `.exe` files were accidentally left -unzipped. - -## v0.31 - -### Breaking change: binary names are now prefixed with `grafana-` - -As first announced in v0.29, release binary names are now prefixed with -`grafana-`: - -- `agent` is now `grafana-agent`. -- `agentctl` is now `grafana-agentctl`. - -For the `grafana/agent` Docker container, the entrypoint is now -`/bin/grafana-agent`. A symbolic link from `/bin/agent` to the new binary has -been added. - -For the `grafana/agentctl` Docker container, the entrypoint is now -`/bin/grafana-agentctl`. A symbolic link from `/bin/agentctl` to the new binary -has been added. - -These symbolic links will be removed in v0.33. Custom entrypoints must be -updated prior to v0.33 to use the new binaries before the symbolic links get -removed. - -## v0.30 - -### Breaking change: `ebpf_exporter` integration removed - -The `ebpf_exporter` version bundled in the Agent used [bcc][] to compile eBPF -programs at runtime. This made it hard to run successfully, as the -dynamic linking approach required a compiler, the correct kernel headers, as -well as an exact match of the libbpf toolchain on the host system. For these -reasons, we've decided to remove the `ebpf_exporter` integration. - -Running the `ebpf_exporter` integration is now deprecated and will result in -configuration errors. To continue using the same configuration file, remove the -`ebpf` block. - -[bcc]: https://github.com/iovisor/bcc - -## v0.29 - -### Breaking change: JSON-encoded traces from OTLP versions below 0.16.0 are no longer supported - -Grafana Agent's OpenTelemetry Collector dependency has been updated from -v0.55.0 to v0.61.0. OpenTelemetry Collector v0.58.0 [no longer -translates][translation-removal] from InstrumentationLibrary to Scope. - -This means that JSON-encoded traces that still use InstrumentationLibrary will -be dropped. To work around this issue, either send traces using protobuf or -update your OTLP protocol version to v0.16.0 or newer. - -[translation-removal]: https://github.com/open-telemetry/opentelemetry-collector/pull/5819 - -### Deprecation: binary names will be prefixed with `grafana-` in v0.31.0 - -The binary names `agent` and `agentctl` have been deprecated -and will be renamed to `grafana-agent` and `grafana-agentctl` respectively in -the v0.31.0 release. - -As part of this change, the Docker containers for the v0.31.0 release will -include symbolic links from the old binary names to the new binary names. - -There is no action to take at this time. - -## v0.26 - -### Breaking change: Deprecated YAML fields in `server` block removed - -The YAML fields which were first [deprecated in the v0.24.0 -release](#deprecation-on-yaml-fields-in-server-block-that-have-flags) have now -been removed, replaced by equivalent command line flags. Please refer to the -original deprecation notice for instructions for how to migrate to the command -line flags. - -### Breaking change: Reconcile sampling policies between Agent and OTel - -Configuring sampling policies in the `tail_sampling` block of the `traces` -block has been changed to be equal with the upstream configuration of the OTel -processor. It now requires that the policy `type` is specified. - -Old configuration: - -```yaml -traces: - configs: - - name: default - ... - tail_sampling: - policies: - - latency: - threshold_ms: 100 -``` - -New configuration: - -```yaml -traces: - configs: - - name: default - ... - tail_sampling: - policies: - - type: latency - latency: - threshold_ms: 100 -``` - -## v0.24 - -### Breaking change: Integrations renamed when `integrations-next` feature flag is used - -This change only applies to users utilizing the `integrations-next` feature -flag. Nothing is changed for configuring integrations when the feature flag is -not used. - -Most `integrations-next` integrations have been renamed to describe what -telemetry data they generate instead of the projects they are powered by. - -* `consul_exporter` is now `consul` -* `dnsmasq_exporter` is now `dnsmasq` -* `elasticsearch_exporter` is now `elasticsearch` -* `github_exporter` is now `github` -* `kafka_exporter` is now `kafka` -* `memcached_exporter` is now `memcached` -* `mongodb_exporter` is now `mongodb` -* `mysqld_exporter` is now `mysql` - * Note that it is `mysql` and _not_ `mysqld` -* `postgres_exporter` is now `postgres` -* `process_exporter` is now `process` -* `redis_exporter` is now `redis` -* `statsd_exporter` is now `statsd` -* `windows_exporter` is now `windows` - -Keys in the `integrations` config block have changed to match the above: - -* `integrations.consul_exporter_configs` is now `integrations.consul_configs` -* `integrations.dnsmasq_exporter_configs` is now `integrations.dnsmasq_configs` -* `integrations.elasticsearch_exporter_configs` is now `integrations.elasticsearch_configs` -* `integrations.github_exporter_configs` is now `integrations.github_configs` -* `integrations.kafka_exporter_configs` is now `integrations.kafka_configs` -* `integrations.memcached_exporter_configs` is now `integrations.memcached_configs` -* `integrations.mongodb_exporter_configs` is now `integrations.mongodb_configs` -* `integrations.mysqld_exporter_configs` is now `integrations.mysql_configs` -* `integrations.postgres_exporter_configs` is now `integrations.postgres_configs` -* `integrations.process_exporter` is now `integrations.process` -* `integrations.redis_exporter_configs` is now `integrations.redis_configs` -* `integrations.statsd_exporter` is now `integrations.statsd` -* `integrations.windows_exporter` is now `integrations.windows` - -Integrations not listed here have not changed; `node_exporter` still has the -same name. - -This change propagates to the label values generated by these integrations. For -example, `job="integrations/redis_exporter` will now be `job="redis"`. - -### Change: Separating YAML and command line flags - -As of this release, we are starting to separate what can be configured within -the YAML file, and what can be configured by command line flag. Previously, -there was a lot of overlap: many things could be set by both command line flag -and configuration file, with command line flags taking precedence. - -The configuration file will be used for settings that can be updated at runtime -using the `/-/reload` endpoint or sending SIGHUP. Meanwhile, command line flags -will be used for settings that must remain consistent throughout the process -lifetime, such as the HTTP listen port. - -This conceptual change will require some number of breaking changes. This -release focuses on the `server` block of the YAML, which has historically -caused the most issues with the `/-/reload` endpoint working correctly. - -There may be more breaking changes in the future as we identify more settings -that must be static and moved to flags. These changes will either be moving a -YAML field to a flag or moving a flag to a YAML field. After we are done with -this migration, there will be no overlap between flags and the YAML file. - -### Deprecation on YAML fields in `server` block that have flags - -The `server` block is the most impacted by the separation of flags/fields. -Instead of making a breaking change immediately, we are deprecating these -fields. - -> **NOTE**: These deprecated fields will be removed in the v0.26.0 release. We -> will communicate when other deprecated features will be removed when a -> timeline is established. - -The following fields are now deprecated in favor of command line flags: - -* `server.register_instrumentation` -* `server.graceful_shutdown_timeout` -* `server.log_source_ips_enabled` -* `server.log_source_ips_header` -* `server.log_source_ips_regex` -* `server.http_listen_network` -* `server.http_listen_address` -* `server.http_listen_port` -* `server.http_listen_conn_limit` -* `server.http_server_read_timeout` -* `server.http_server_write_timout` -* `server.http_server_idle_timeout` -* `server.grpc_listen_network` -* `server.grpc_listen_address` -* `server.grpc_listen_port` -* `server.grpc_listen_conn_limit` -* `server.grpc_server_max_recv_msg_size` -* `server.grpc_server_max_send_msg_size` -* `server.grpc_server_max_concurrent_streams` -* `server.grpc_server_max_connection_idle` -* `server.grpc_server_max_connection_age` -* `server.grpc_server_max_connection_age_grace` -* `server.grpc_server_keepalive_time` -* `server.grpc_server_keepalive_timeout` -* `server.grpc_server_min_time_between_pings` -* `server.grpc_server_ping_without_stream_allowed` - -This is most of the fields; the remaining non-deprecated fields are -`server.log_level`, `server.log_format`, `server.http_tls_config`, and -`server.grpc_tls_config`, which support dynamic updating. - -### Breaking change: Removing support for dynamically updating deprecated server fields - -`/-/reload` will now fail if any of the deprecated server block fields have -changed. It is still valid to change a non-deprecated field (i.e., changing the -log level). - -### Breaking change: Server-specific command line flags have changed - -The following flags are _new_: - -* `-server.http.enable-tls` -* `-server.grpc.enable-tls` -* `-server.http.address` -* `-server.grpc.address` - -The following flags have been _removed_: - -* `-log.level` (replacement: use YAML field `server.log_level`) -* `-log.format` (replacement: use YAML field `server.log_format`) -* `-server.http-tls-cert-path` (replacement: use YAML field `server.http_tls_config`) -* `-server.http-tls-key-path` (replacement: use YAML field `server.http_tls_config`) -* `-server.http-tls-client-auth` (replacement: use YAML field `server.http_tls_config`) -* `-server.http-tls-ca-path` (replacement: use YAML field `server.http_tls_config`) -* `-server.grpc-tls-cert-path` (replacement: use YAML field `server.grpc_tls_config`) -* `-server.grpc-tls-key-path` (replacement: use YAML field `server.grpc_tls_config`) -* `-server.grpc-tls-client-auth` (replacement: use YAML field `server.grpc_tls_config`) -* `-server.grpc-tls-ca-path` (replacement: use YAML field `server.grpc_tls_config`) -* `-server.http-listen-address` (replacement: use the new `-server.http.address` flag, which combines host and port) -* `-server.http-listen-port` (replacement: use the new `-server.http.address` flag, which combines host and port) -* `-server.grpc-listen-address` (replacement: use the new `-server.grpc.address` flag, which combines host and port) -* `-server.grpc-listen-port` (replacement: use the new `-server.grpc.address` flag, which combines host and port) -* `-server.path-prefix` (no replacement; this flag was unsupported and caused undefined behavior when set) - -The following flags have been _renamed_: - -* `-server.log-source-ips-enabled` has been renamed to `-server.log.source-ips.enabled` -* `-server.log-source-ips-header` has been renamed to `-server.log.source-ips.header` -* `-server.log-source-ips-regex` has been renamed to `-server.log.source-ips.regex` -* `-server.http-listen-network` has been renamed to `-server.http.network` -* `-server.http-conn-limit` has been renamed to `-server.http.conn-limit` -* `-server.http-read-timeout` has been renamed to `-server.http.read-timeout` -* `-server.http-write-timeout` has been renamed to `-server.http.write-timeout` -* `-server.http-idle-timeout` has been renamed to `-server.http.idle-timeout` -* `-server.grpc-listen-network` has been renamed to `-server.grpc.network` -* `-server.grpc-conn-limit` has been renamed to `-server.grpc.conn-limit` -* `-server.grpc-max-recv-msg-size-bytes` has been renamed to `-server.grpc.max-recv-msg-size-bytes` -* `-server.grpc-max-send-msg-size-bytes` has been renamed to `-server.grpc.max-send-msg-size-bytes` -* `-server.grpc-max-concurrent-streams` has been renamed to `-server.grpc.max-concurrent-streams` - -### Breaking change: New TLS flags required for enabling TLS - -The two new flags, `-server.http.enable-tls` and `-server.grpc.enable-tls` now -must be provided for TLS support to be enabled. - -This is a change over the previous behavior where TLS was automatically enabled -when a certificate pair was provided. - -### Breaking change: Default HTTP/gRPC address changes - -The HTTP and gRPC listen addresses now default to `127.0.0.1:12345` and -`127.0.0.1:12346` respectively. - -If running inside of a container, you must change these to `0.0.0.0` to -externally communicate with the agent's HTTP server. - -The listen addresses may be changed via `-server.http.address` and -`-server.grpc.address` respectively. - -### Breaking change: Removal of `-reload-addr` and `-reload-port` flags - -The `-reload-addr` and `-reload-port` flags have been removed. They were -initially added to workaround an issue where reloading a changed server block -would cause the primary HTTP server to restart. As the HTTP server settings are -now static, this can no longer happen, and as such the flags have been removed. - -### Change: In-memory autoscrape for integrations-next - -This change is only relevant to those using the `integrations-next` feature flag. - -In-memory connections will now be used for autoscraping-enabled integrations. -This is a change over the previous behavior where autoscraping integrations -would connect to themselves over the network. As a result of this change, the -`integrations.client_config` field is no longer necessary and has been removed. - -## v0.22 - -### `node_exporter` integration deprecated field names - -The following field names for the `node_exporter` integration are now deprecated: - -* `netdev_device_whitelist` is deprecated in favor of `netdev_device_include`. -* `netdev_device_blacklist` is deprecated in favor of `netdev_device_exclude`. -* `systemd_unit_whitelist` is deprecated in favor of `systemd_unit_include`. -* `systemd_unit_blacklist` is deprecated in favor of `systemd_unit_exclude`. -* `filesystem_ignored_mount_points` is deprecated in favor of - `filesystem_mount_points_exclude`. -* `filesystem_ignored_fs_types` is deprecated in favor of - `filesystem_fs_types_exclude`. - -This change aligns with the equivalent flag names also being deprecated in the -upstream node_exporter. - -Support for the old field names will be removed in a future version. A warning -will be logged if using the old field names when the integration is enabled. - -## v0.21.2, v0.20.1 - -### Disabling of config retrieval endpoints - -These two patch releases, as part of a fix for -[CVE-2021-41090](https://github.com/grafana/agent/security/advisories/GHSA-9c4x-5hgq-q3wh), -disable the `/-/config` and `/agent/api/v1/configs/{name}` endpoints by -default. Pass the `--config.enable-read-api` flag at the command line to -re-enable them. - -## v0.21 - -### Integrations: Change in how instance labels are handled (Breaking change) - -Integrations will now use a SUO-specific `instance` label value. Integrations -that apply to a whole machine or agent will continue to use `:`, but integrations that connect to an external -system will now infer an appropriate value based on the config for that specific -integration. Please refer to the documentation for each integration for which -defaults are used. - -*Note:* In some cases, a default value for `instance` cannot be inferred. This -is the case for mongodb_exporter and postgres_exporter if more than one SUO is -being connected to. In these cases, the instance value can be manually set by -configuring the `instance` field on the integration. This can also be useful if -two agents infer the same value for instance for the same integration. - -As part of this change, the `agent_hostname` label is permanently affixed to -self-scraped integrations and cannot be disabled. This disambiguates multiple -agents using the same instance label for an integration, and allows users to -identify which agents need to be updated with an override for `instance`. - -Both `use_hostname_label` and `replace_instance_label` are now both deprecated -and ignored from the YAML file, permanently treated as true. A future release -will remove these fields, causing YAML errors on load instead of being silently -ignored. - -## v0.20 - -### Traces: Changes to receiver's TLS config (Breaking change). - -Upgrading to OpenTelemetry v0.36.0 contains a change in the receivers TLS config. -TLS params have been changed from being squashed to being in its own block. -This affect the jaeger receiver's `remote_sampling` config. - -Example old config: - -```yaml -receivers: - jaeger: - protocols: - grpc: null, - remote_sampling: - strategy_file: - insecure: true -``` - -Example new config: - -```yaml -receivers: - jaeger: - protocols: - grpc: null, - remote_sampling: - strategy_file: - tls: - insecure: true -``` - -### Traces: push_config is no longer supported (Breaking change) - -`push_config` was deprecated in favor of `remote_write` in v0.14.0, while -maintaining backwards compatibility. -Refer to the [deprecation announcement](#tempo-push_config-deprecation) for how to upgrade. - -### Traces: legacy OTLP gRPC port no longer default port - -OTLP gRPC receivers listen at port `4317` by default, instead of at port `55680`. -This goes in line with OTLP legacy port deprecation. - -To upgrade, point the client instrumentation push endpoint to `:4317` if using -the default OTLP gRPC endpoint. - -## v0.19 - -### Traces: Deprecation of "tempo" in config and metrics. (Deprecation) - -The term `tempo` in the config has been deprecated of favor of `traces`. This -change is to make intent clearer. - -Example old config: - -```yaml -tempo: - configs: - - name: default - receivers: - jaeger: - protocols: - thrift_http: -``` - -Example of new config: -```yaml -traces: - configs: - - name: default - receivers: - jaeger: - protocols: - thrift_http: -``` - -Any tempo metrics have been renamed from `tempo_*` to `traces_*`. - - -### Tempo: split grouping by trace from tail sampling config (Breaking change) - -Load balancing traces between agent instances has been moved from an embedded -functionality in tail sampling to its own configuration block. -This is done due to more processor benefiting from receiving consistently -receiving all spans for a trace in the same agent to be processed, such as -service graphs. - -As a consequence, `tail_sampling.load_balancing` has been deprecated in favor of -a `load_balancing` block. Also, `port` has been renamed to `receiver_port` and -moved to the new `load_balancing` block. - -Example old config: - -```yaml -tail_sampling: - policies: - - always_sample: - port: 4318 - load_balancing: - exporter: - insecure: true - resolver: - dns: - hostname: agent - port: 4318 -``` - -Example new config: - -```yaml -tail_sampling: - policies: - - always_sample: -load_balancing: - exporter: - insecure: true - resolver: - dns: - hostname: agent - port: 4318 - receiver_port: 4318 -``` - -### Metrics: Deprecation of "prometheus" in config. (Deprecation) - -The term `prometheus` in the config has been deprecated of favor of `metrics`. This -change is to make it clearer when referring to Prometheus or another -Prometheus-like database, and configuration of Grafana Agent to send metrics to -one of those systems. - -Old configs will continue to work for now, but support for the old format will -eventually be removed. To migrate your config, change the `prometheus` key to -`metrics`. - -Example old config: - -```yaml -prometheus: - configs: - - name: default - host_filter: false - scrape_configs: - - job_name: local_scrape - static_configs: - - targets: ['127.0.0.1:12345'] - labels: - cluster: 'localhost' - remote_write: - - url: http://localhost:9009/api/prom/push -``` - -Example new config: - -```yaml -metrics: - configs: - - name: default - host_filter: false - scrape_configs: - - job_name: local_scrape - static_configs: - - targets: ['127.0.0.1:12345'] - labels: - cluster: 'localhost' - remote_write: - - url: http://localhost:9009/api/prom/push -``` - -### Tempo: prom_instance rename (Breaking change) - -As part of `prometheus` being renamed to `metrics`, the spanmetrics -`prom_instance` field has been renamed to `metrics_instance`. This is a breaking -change, and the old name will no longer work. - -Example old config: - -```yaml -tempo: - configs: - - name: default - spanmetrics: - prom_instance: default -``` - -Example new config: - -```yaml -tempo: - configs: - - name: default - spanmetrics: - metrics_instance: default -``` - -### Logs: Deprecation of "loki" in config. (Deprecation) - -The term `loki` in the config has been deprecated of favor of `logs`. This -change is to make it clearer when referring to Grafana Loki, and -configuration of Grafana Agent to send logs to Grafana Loki. - -Old configs will continue to work for now, but support for the old format will -eventually be removed. To migrate your config, change the `loki` key to `logs`. - -Example old config: - -```yaml -loki: - positions_directory: /tmp/loki-positions - configs: - - name: default - clients: - - url: http://localhost:3100/loki/api/v1/push - scrape_configs: - - job_name: system - static_configs: - - targets: ['localhost'] - labels: - job: varlogs - __path__: /var/log/*log -``` - -Example new config: - -```yaml -logs: - positions_directory: /tmp/loki-positions - configs: - - name: default - clients: - - url: http://localhost:3100/loki/api/v1/push - scrape_configs: - - job_name: system - static_configs: - - targets: ['localhost'] - labels: - job: varlogs - __path__: /var/log/*log -``` - -### Tempo: Deprecation of "loki" in config. (Deprecation) - -As part of the `loki` to `logs` rename, parts of the automatic_logging component -in Tempo have been updated to refer to `logs_instance` instead. - -Old configurations using `loki_name`, `loki_tag`, or `backend: loki` will -continue to work as of this version, but support for the old config format -will eventually be removed. - -Example old config: - -```yaml -tempo: - configs: - - name: default - automatic_logging: - backend: loki - loki_name: default - spans: true - processes: true - roots: true - overrides: - loki_tag: tempo -``` - -Example new config: - -```yaml -tempo: - configs: - - name: default - automatic_logging: - backend: logs_instance - logs_instance_name: default - spans: true - processes: true - roots: true - overrides: - logs_instance_tag: tempo -``` - -## v0.18 - -### Tempo: Remote write TLS config - -Tempo `remote_write` now supports configuring TLS settings in the trace -exporter's client. `insecure_skip_verify` is moved into this setting's block. - -Old configurations with `insecure_skip_verify` outside `tls_config` will continue -to work as of this version, but support will eventually be removed. -If both `insecure_skip_verify` and `tls_config.insecure_skip_verify` are used, -then the latter take precedence. - -Example old config: - -``` -tempo: - configs: - - name: default - remote_write: - - endpoint: otel-collector:55680 - insecure: true - insecure_skip_verify: true -``` - -Example new config: - -``` -tempo: - configs: - - name: default - remote_write: - - endpoint: otel-collector:55680 - insecure: true - tls_config: - insecure_skip_verify: true -``` - -## v0.15 - -### Tempo: `automatic_logging` changes - -Tempo automatic logging previously assumed that the operator wanted to log -to a Loki instance. With the addition of an option to log to stdout a new -field is required to maintain the old behavior. - -Example old config: - -``` -tempo: - configs: - - name: default - automatic_logging: - loki_name: -``` - -Example new config: - -``` -tempo: - configs: - - name: default - automatic_logging: - backend: loki - loki_name: -``` - -## v0.14 - -### Scraping Service security change - -v0.14.0 changes the default behavior of the scraping service config management -API to reject all configuration files that read credentials from a file on disk. -This prevents malicious users from crafting an instance config file that read -arbitrary files on disk and send their contents to remote endpoints. - -To revert to the old behavior, add `dangerous_allow_reading_files: true` in your -`scraping_service` config. - -Example old config: - -```yaml -prometheus: - scraping_service: - # ... -``` - -Example new config: - -```yaml -prometheus: - scraping_service: - dangerous_allow_reading_files: true - # ... -``` - -### SigV4 config change - -v0.14.0 updates the internal Prometheus dependency to 2.26.0, which includes -native support for SigV4, but uses a slightly different configuration structure -than the Grafana Agent did. - -To migrate, remove the `enabled` key from your `sigv4` configs. If `enabled` was -the only key, define sigv4 as an empty object: `sigv4: {}`. - -Example old config: - -```yaml -sigv4: - enabled: true - region: us-east-1 -``` - -Example new config: - -```yaml -sigv4: - region: us-east-1 -``` - -### Tempo: `push_config` deprecation - -`push_config` is now deprecated in favor of a `remote_write` array which allows for sending spans to multiple endpoints. -`push_config` will be removed in a future release, and it is recommended to migrate to `remote_write` as soon as possible. - -To migrate, move the batch options outside the `push_config` block. -Then, add a `remote_write` array and move the remaining of your `push_config` block inside it. - -Example old config: - -```yaml -tempo: - configs: - - name: default - receivers: - otlp: - protocols: - gpc: - push_config: - endpoint: otel-collector:55680 - insecure: true - batch: - timeout: 5s - send_batch_size: 100 -``` - -Example migrated config: - -```yaml -tempo: - configs: - - name: default - receivers: - otlp: - protocols: - gpc: - remote_write: - - endpoint: otel-collector:55680 - insecure: true - batch: - timeout: 5s - send_batch_size: 100 -``` - - -## v0.12 - -v0.12 had two breaking changes: the `tempo` and `loki` sections have been changed to require a list of `tempo`/`loki` configs rather than just one. - -### Tempo Config Change - -The Tempo config (`tempo` in the config file) has been changed to store -configs within a `configs` list. This allows for defining multiple Tempo -instances for collecting traces and forwarding them to different OTLP -endpoints. - -To migrate, add a `configs:` array and move your existing config inside of it. -Give the element a `name: default` field. - -Each config must have a unique non-empty name. `default` is recommended for users -that don't have other configs. The name of the config will be added as a -`tempo_config` label for metrics. - -Example old config: - -```yaml -tempo: - receivers: - jaeger: - protocols: - thrift_http: - attributes: - actions: - - action: upsert - key: env - value: prod - push_config: - endpoint: otel-collector:55680 - insecure: true - batch: - timeout: 5s - send_batch_size: 100 -``` - -Example migrated config: - -```yaml -tempo: - configs: - - name: default - receivers: - jaeger: - protocols: - thrift_http: - attributes: - actions: - - action: upsert - key: env - value: prod - push_config: - endpoint: otel-collector:55680 - insecure: true - batch: - timeout: 5s - send_batch_size: 100 -``` - -### Loki Promtail Config Change - -The Loki Promtail config (`loki` in the config file) has been changed to store -configs within a `configs` list. This allows for defining multiple Loki -Promtail instances for collecting logs and forwarding them to different Loki -servers. - -To migrate, add a `configs:` array and move your existing config inside of it. -Give the element a `name: default` field. - -Each config must have a unique non-empty name. `default` is recommended for users -that don't have other configs. The name of the config will be added as a -`loki_config` label for Loki Promtail metrics. - -Example old config: - -```yaml -loki: - positions: - filename: /tmp/positions.yaml - clients: - - url: http://loki:3100/loki/api/v1/push - scrape_configs: - - job_name: system - static_configs: - - targets: - - localhost - labels: - job: varlogs - __path__: /var/log/*log -``` - -Example migrated config: - -```yaml -loki: - configs: - - name: default - positions: - filename: /tmp/positions.yaml - clients: - - url: http://loki:3100/loki/api/v1/push - scrape_configs: - - job_name: system - static_configs: - - targets: - - localhost - labels: - job: varlogs - __path__: /var/log/*log -``` diff --git a/docs/sources/static/set-up/_index.md b/docs/sources/static/set-up/_index.md deleted file mode 100644 index 93fb9171bf..0000000000 --- a/docs/sources/static/set-up/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -aliases: -- ../set-up/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/ -- /docs/grafana-cloud/send-data/agent/static/set-up/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/ -description: Learn how to set up Grafana Agent in static mode -menuTitle: Set up static mode -title: Set up Grafana Agent in static mode -weight: 100 ---- - -# Set up Grafana Agent in static mode - -This section includes information that helps you get Grafana Agent in static mode installed and configured. - -{{< section >}} diff --git a/docs/sources/static/set-up/deploy-agent.md b/docs/sources/static/set-up/deploy-agent.md deleted file mode 100644 index 5325d3b71c..0000000000 --- a/docs/sources/static/set-up/deploy-agent.md +++ /dev/null @@ -1,393 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/deploy-agent/ -- /docs/grafana-cloud/send-data/agent/static/set-up/deploy-agent/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/deploy-agent/ -description: Learn how to deploy Grafana Agent in different topologies -menuTitle: Deploy static mode -title: Deploy Grafana Agent in static mode -weight: 300 ---- - -{{< docs/shared source="agent" lookup="/deploy-agent.md" version="" >}} - -## For scalable ingestion of traces - -For small workloads, it is normal to have just one Agent handle all incoming spans with no need of load balancing. -However, for large workloads it is desirable to spread out the load of processing spans over multiple Agent instances. - -To scale the Agent for trace ingestion, do the following: -1. Set up the `load_balancing` section of the Agent's `traces` config. -2. Start multiple Agent instances, all with the same configuration, so that: - * Each Agent load balances using the same strategy. - * Each Agent processes spans in the same way. -3. The cluster of Agents is now setup for load balancing. It works as follows: - 1. Any of the Agents can receive spans from instrumented applications via the configured `receivers`. - 2. When an Agent firstly receives spans, it will forward them to any of the Agents in the cluster according to the `load_balancing` configuration. - - - -### tail_sampling - -If some of the spans for a trace end up in a different Agent, `tail_sampling` will not sample correctly. -Enabling `load_balancing` is necessary if `tail_sampling` is enabled and when there could be more than one Agent instance processing spans for the same trace. -`load_balancing` will make sure that all spans of a given trace will be processed by the same Agent instance. - -### spanmetrics - -All spans for a given `service.name` must be processed by the same `spanmetrics` Agent. -To make sure that this is the case, set up `load_balancing` with `routing_key: service`. - -### service_graphs - -It is challenging to scale `service_graphs` over multiple Agent instances. -* For `service_graphs` to work correctly, each "client" span must be paired - with a "server" span in order to calculate metrics such as span duration. -* If a "client" span goes to one Agent, but a "server" span goes to another Agent, - then no single Agent will be able to pair the spans and a metric won't be generated. - -`load_balancing` can solve this problem partially if it is configured with `routing_key: traceID`. - * Each Agent will then be able to calculate service graph for each "client"/"server" pair in a trace. - * However, it is possible to have a span with similar "server"/"client" values - in a different trace, processed by another Agent. - * If two different Agents process similar "server"/"client" spans, - they will generate the same service graph metric series. - * If the series from two Agents are the same, this will lead to issues - when writing them to the backend database. - * Users could differentiate the series by adding a label such as `"agent_id"`. - * Unfortunately, there is currently no method in the Agent to aggregate those series from different Agents and merge them into one series. - * A PromQL query could be used to aggregate the metrics from different Agents. - * If the metrics are stored in Grafana Mimir, cardinality issues due to `"agent_id"` labels can be solved using [Adaptive Metrics][adaptive-metrics]. - -A simpler, more scalable alternative to generating service graph metrics in the Agent is to generate them entirely in the backend database. -For example, service graphs can be [generated][tempo-servicegraphs] in Grafana Cloud by the Tempo traces database. - -[tempo-servicegraphs]: https://grafana.com/docs/tempo/latest/metrics-generator/service_graphs/ -[adaptive-metrics]: https://grafana.com/docs/grafana-cloud/cost-management-and-billing/reduce-costs/metrics-costs/control-metrics-usage-via-adaptive-metrics/ - -### Example Kubernetes configuration -{{< collapse title="Example Kubernetes configuration with DNS load balancing" >}} -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: grafana-cloud-monitoring ---- -apiVersion: v1 -kind: Service -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -spec: - ports: - - name: agent-traces-otlp-grpc - port: 9411 - protocol: TCP - targetPort: 9411 - selector: - name: agent-traces ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: k6-trace-generator - namespace: grafana-cloud-monitoring -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 1 - selector: - matchLabels: - name: k6-trace-generator - template: - metadata: - labels: - name: k6-trace-generator - spec: - containers: - - env: - - name: ENDPOINT - value: agent-traces-headless.grafana-cloud-monitoring.svc.cluster.local:9411 - image: ghcr.io/grafana/xk6-client-tracing:v0.0.2 - imagePullPolicy: IfNotPresent - name: k6-trace-generator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -spec: - minReadySeconds: 10 - replicas: 3 - revisionHistoryLimit: 1 - selector: - matchLabels: - name: agent-traces - template: - metadata: - labels: - name: agent-traces - spec: - containers: - - args: - - -config.file=/etc/agent/agent.yaml - command: - - /bin/grafana-agent - image: grafana/agent:v0.38.0 - imagePullPolicy: IfNotPresent - name: agent-traces - ports: - - containerPort: 9411 - name: otlp-grpc - protocol: TCP - - containerPort: 34621 - name: agent-lb - protocol: TCP - volumeMounts: - - mountPath: /etc/agent - name: agent-traces - volumes: - - configMap: - name: agent-traces - name: agent-traces ---- -apiVersion: v1 -kind: Service -metadata: - name: agent-traces-headless - namespace: grafana-cloud-monitoring -spec: - clusterIP: None - ports: - - name: agent-lb - port: 34621 - protocol: TCP - targetPort: agent-lb - selector: - name: agent-traces - type: ClusterIP ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -data: - agent.yaml: | - traces: - configs: - - name: default - load_balancing: - exporter: - insecure: true - resolver: - dns: - hostname: agent-traces-headless.grafana-cloud-monitoring.svc.cluster.local - port: 34621 - timeout: 5s - interval: 60s - receiver_port: 34621 - receivers: - otlp: - protocols: - grpc: - endpoint: 0.0.0.0:9411 - remote_write: - - basic_auth: - username: 111111 - password: pass - endpoint: tempo-prod-06-prod-gb-south-0.grafana.net:443 - retry_on_failure: - enabled: false -``` -{{< /collapse >}} - -{{< collapse title="Example Kubernetes configuration with Kubernetes load balancing" >}} - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: grafana-cloud-monitoring ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent-traces - namespace: grafana-cloud-monitoring ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: grafana-agent-traces-role - namespace: grafana-cloud-monitoring -rules: -- apiGroups: - - "" - resources: - - endpoints - verbs: - - list - - watch - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: grafana-agent-traces-rolebinding - namespace: grafana-cloud-monitoring -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: grafana-agent-traces-role -subjects: -- kind: ServiceAccount - name: grafana-agent-traces - namespace: grafana-cloud-monitoring ---- -apiVersion: v1 -kind: Service -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -spec: - ports: - - name: agent-traces-otlp-grpc - port: 9411 - protocol: TCP - targetPort: 9411 - selector: - name: agent-traces ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: k6-trace-generator - namespace: grafana-cloud-monitoring -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 1 - selector: - matchLabels: - name: k6-trace-generator - template: - metadata: - labels: - name: k6-trace-generator - spec: - containers: - - env: - - name: ENDPOINT - value: agent-traces-headless.grafana-cloud-monitoring.svc.cluster.local:9411 - image: ghcr.io/grafana/xk6-client-tracing:v0.0.2 - imagePullPolicy: IfNotPresent - name: k6-trace-generator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -spec: - minReadySeconds: 10 - replicas: 3 - revisionHistoryLimit: 1 - selector: - matchLabels: - name: agent-traces - template: - metadata: - labels: - name: agent-traces - spec: - containers: - - args: - - -config.file=/etc/agent/agent.yaml - command: - - /bin/grafana-agent - image: grafana/agent:v0.38.0 - imagePullPolicy: IfNotPresent - name: agent-traces - ports: - - containerPort: 9411 - name: otlp-grpc - protocol: TCP - - containerPort: 34621 - name: agent-lb - protocol: TCP - volumeMounts: - - mountPath: /etc/agent - name: agent-traces - serviceAccount: grafana-agent-traces - volumes: - - configMap: - name: agent-traces - name: agent-traces ---- -apiVersion: v1 -kind: Service -metadata: - name: agent-traces-headless - namespace: grafana-cloud-monitoring -spec: - clusterIP: None - ports: - - name: agent-lb - port: 34621 - protocol: TCP - targetPort: agent-lb - selector: - name: agent-traces - type: ClusterIP ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: agent-traces - namespace: grafana-cloud-monitoring -data: - agent.yaml: | - traces: - configs: - - name: default - load_balancing: - exporter: - insecure: true - resolver: - kubernetes: - service: agent-traces-headless - ports: - - 34621 - receiver_port: 34621 - receivers: - otlp: - protocols: - grpc: - endpoint: 0.0.0.0:9411 - remote_write: - - basic_auth: - username: 111111 - password: pass - endpoint: tempo-prod-06-prod-gb-south-0.grafana.net:443 - retry_on_failure: - enabled: false``` -``` - -{{< /collapse >}} - -You need to fill in correct OTLP credentials prior to running the above examples. -The example above can be started by using [k3d][]: - -```bash -k3d cluster create grafana-agent-lb-test -kubectl apply -f kubernetes_config.yaml -``` - -To delete the cluster, run: -```bash -k3d cluster delete grafana-agent-lb-test -``` - -[k3d]: https://k3d.io/v5.6.0/ diff --git a/docs/sources/static/set-up/install/_index.md b/docs/sources/static/set-up/install/_index.md deleted file mode 100644 index 3e62fdbdf8..0000000000 --- a/docs/sources/static/set-up/install/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -aliases: -- ../ -- ../set-up/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/ -description: Learn how to install GRafana Agent in static mode -menuTitle: Install static mode -title: Install Grafana Agent in static mode -weight: 100 ---- - -# Install Grafana Agent in static mode - -You can install Grafana Agent in static mode on Docker, Kubernetes, Linux, macOS, or Windows. - -The following architectures are supported: - -- Linux: AMD64, ARM64 -- Windows: AMD64 -- macOS: AMD64 (Intel), ARM64 (Apple Silicon) -- FreeBSD: AMD64 - -{{< admonition type="note" >}} -ppc64le builds are considered secondary release targets and do not have the same level of support and testing as other platforms. -{{< /admonition >}} - -{{< section >}} - -{{< admonition type="note" >}} -Installing Grafana Agent on other operating systems is possible, but is not recommended or supported. -{{< /admonition >}} - -## Grafana Cloud - -Use the Grafana Agent [Kubernetes configuration](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/) or follow instructions for installing the Grafana Agent in the [Walkthrough](/docs/grafana-cloud/monitor-infrastructure/integrations/get-started/). - -## Data collection - -By default, Grafana Agent sends anonymous usage information to Grafana Labs. Refer to [data collection][] for more information -about what data is collected and how you can opt-out. - -{{% docs/reference %}} -[data collection]: "/docs/agent/ -> /docs/agent//data-collection.md" -[data collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/data-collection.md" -{{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/static/set-up/install/install-agent-binary.md b/docs/sources/static/set-up/install/install-agent-binary.md deleted file mode 100644 index 8d53d83768..0000000000 --- a/docs/sources/static/set-up/install/install-agent-binary.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -aliases: -- ../../set-up/install-agent-binary/ -- ../set-up/install-agent-binary/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-binary/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-binary/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-binary/ -description: Learn how to install Grafana Agent in static mode as a standalone binary -menuTitle: Standalone -title: Install Grafana Agent in static mode as a standalone binary -weight: 700 ---- - -# Install Grafana Agent in static mode as a standalone binary - -Grafana Agent is distributed as a standalone binary for the following operating systems and architectures: - -* Linux: AMD64, ARM64, PPC64, S390X -* macOS: AMD64, (Intel), ARM64 (Apple Silicon) -* Windows: AMD64 - -{{< admonition type="note" >}} -ppc64le builds are considered secondary release targets and do not have the same level of support and testing as other platforms. -{{< /admonition >}} - -The binary executable will run Grafana Agent in standalone mode. If you want to run Grafana Agent as a service, refer to the installation instructions for: - -* [Linux][linux] -* [macOS][macos] -* [Windows][windows] - -## Download Grafana Agent - -To download the Grafana Agent as a standalone binary, perform the following steps. - -1. Navigate to the current Grafana Agent [release](https://github.com/grafana/agent/releases) page. - -1. Scroll down to the **Assets** section. - -1. Download the `grafana-agent` zip file that matches your operating system and machine’s architecture. - -1. Extract the package contents into a directory. - -1. If you are installing Grafana Agent on Linux, macOS, or FreeBSD, run the following command in a terminal: - - ```shell - chmod +x EXTRACTED_BINARY - ``` - -## Next steps - -* [Start Grafana Agent][start] -* [Configure Grafana Agent][configure] - -{{% docs/reference %}} -[linux]: "/docs/agent/ -> /docs/agent//static/set-up/install/install-agent-linux" -[linux]: "/docs/grafana-cloud/ -> ./install-agent-linux" -[macos]: "/docs/agent/ -> /docs/agent//static/set-up/install/install-agent-macos" -[macos]: "/docs/grafana-cloud/ -> ./install-agent-macos" -[windows]: "/docs/agent/ -> /docs/agent//static/set-up/install/install-agent-on-windows" -[windows]: "/docs/grafana-cloud/ -> ./install-agent-on-windows" -[start]: "/docs/agent/ -> /docs/agent//static/set-up/start-agent#standalone-binary" -[start]: "/docs/grafana-cloud/ -> ../start-agent#standalone-binary" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration" -[configure]: "/docs/grafana-cloud/ -> ../../configuration" -{{% /docs/reference %}} diff --git a/docs/sources/static/set-up/install/install-agent-docker.md b/docs/sources/static/set-up/install/install-agent-docker.md deleted file mode 100644 index bece555966..0000000000 --- a/docs/sources/static/set-up/install/install-agent-docker.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -aliases: -- ../../set-up/install-agent-docker/ -- ../set-up/install-agent-docker/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-docker/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-docker/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-docker/ -description: Learn how to run Grafana Agent in static mode in a Docker container -menuTitle: Docker -title: Run Grafana Agent in static mode in a Docker container -weight: 200 ---- - -# Run Grafana Agent in static mode in a Docker container - -Grafana Agent is available as a Docker container image on the following platforms: - -* [Linux containers][] for AMD64 and ARM64. -* [Windows containers][] for AMD64. - -[Linux containers]: #run-a-linux-docker-container -[Windows containers]: #run-a-windows-docker-container - -## Before you begin - -* Install [Docker][] on your computer. -* Create and save a Grafana Agent YAML [configuration file][configure] on your computer. - -[Docker]: https://docker.io - -## Run a Linux Docker container - -To run a Grafana Agent Docker container on Linux, run the following command in a terminal window: - -```shell -docker run \ - -v WAL_DATA_DIRECTORY:/etc/agent/data \ - -v CONFIG_FILE_PATH:/etc/agent/agent.yaml \ - grafana/agent:{{< param "AGENT_RELEASE" >}} -``` - -Replace `CONFIG_FILE_PATH` with the configuration file path on your Linux host system. - -{{< admonition type="note" >}} -For the flags to work correctly, you must expose the paths on your Linux host to the Docker container through a bind mount. -{{< /admonition >}} - -## Run a Windows Docker container - -To run a Grafana Agent Docker container on Windows, run the following command in a Windows command prompt: - -```shell -docker run ^ - -v WAL_DATA_DIRECTORY:C:\etc\grafana-agent\data ^ - -v CONFIG_FILE_PATH:C:\etc\grafana-agent ^ - grafana/agent:{{< param "AGENT_RELEASE" >}}-windows -``` - -Replace the following: - -* `CONFIG_FILE_PATH`: The configuration file path on your Windows host system. -* `WAL_DATA_DIRECTORY`: the directory used to store your metrics before sending them to Prometheus. Old WAL data is cleaned up every hour and is used for recovery if the process crashes. - -{{< admonition type="note" >}} -For the flags to work correctly, you must expose the paths on your Windows host to the Docker container through a bind mount. -{{< /admonition >}} - -## Next steps - -- [Start Grafana Agent][start] -- [Configure Grafana Agent][configure] - -{{% docs/reference %}} -[start]: "/docs/agent/ -> /docs/agent//static/set-up/start-agent" -[start]: "/docs/grafana-cloud/ -> ../start-agent" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration/create-config-file" -[configure]: "/docs/grafana-cloud/ -> ../../configuration/create-config-file" -{{% /docs/reference %}} diff --git a/docs/sources/static/set-up/install/install-agent-kubernetes.md b/docs/sources/static/set-up/install/install-agent-kubernetes.md deleted file mode 100644 index d55a7d9af2..0000000000 --- a/docs/sources/static/set-up/install/install-agent-kubernetes.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-kubernetes/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-kubernetes/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-kubernetes/ -description: Learn how to deploy Grafana Agent in static mode on Kubernetes -menuTitle: Kubernetes -title: Deploy Grafana Agent in static mode on Kubernetes -weight: 300 ---- - -# Deploy Grafana Agent in static mode on Kubernetes - -You can use the Helm chart for Grafana Agent to deploy Grafana Agent in static mode on Kubernetes. - -## Before you begin - -* Install [Helm][] on your computer. -* Configure a Kubernetes cluster that you can use for Grafana Agent. -* Configure your local Kubernetes context to point to the cluster. - -[Helm]: https://helm.sh - -## Deploy - -{{< admonition type="note" >}} -These instructions show you how to install the generic [Helm chart](https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent) for Grafana Agent. -You can deploy Grafana Agent in static mode or flow mode. The Helm chart deploys flow mode by default. -{{< /admonition >}} - -To deploy Grafana Agent in static mode on Kubernetes using Helm, run the following commands in a terminal window: - -1. Add the Grafana Helm chart repository: - - ```shell - helm repo add grafana https://grafana.github.io/helm-charts - ``` - -1. Update the Grafana Helm chart repository: - - ```shell - helm repo update - ``` - -1. Install Grafana Agent in static mode: - - ```shell - helm install grafana/grafana-agent --set agent.mode=static - ``` - - Replace the following: - - - _``_: The name to use for your Grafana Agent installation, such as `grafana-agent`. - - {{< admonition type="warning" >}} - Always pass `--set agent.mode=static` in `helm install` or `helm upgrade` commands to ensure Grafana Agent gets installed in static mode. - Alternatively, set `agent.mode` to `static` in your values.yaml file. - {{< /admonition >}} - -For more information on the Grafana Agent Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. - -[Artifact Hub]: https://artifacthub.io/packages/helm/grafana/grafana-agent - diff --git a/docs/sources/static/set-up/install/install-agent-linux.md b/docs/sources/static/set-up/install/install-agent-linux.md deleted file mode 100644 index 716a48df2a..0000000000 --- a/docs/sources/static/set-up/install/install-agent-linux.md +++ /dev/null @@ -1,225 +0,0 @@ ---- -aliases: -- ../../set-up/install-agent-linux/ -- ../set-up/install-agent-linux/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-linux/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-linux/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-linux/ -description: Learn how to install Grafana Agent in static mode on Linux -menuTitle: Linux -title: Install Grafana Agent in static mode on Linux -weight: 400 ---- - -# Install Grafana Agent in static mode on Linux - -You can install Grafana Agent in static mode on Linux. - -## Install on Debian or Ubuntu - -To install Grafana Agent in static mode on Debian or Ubuntu, run the following commands in a terminal window. - -1. Import the GPG key and add the Grafana package repository: - - ```shell - sudo mkdir -p /etc/apt/keyrings/ - wget -q -O - https://apt.grafana.com/gpg.key | gpg --dearmor | sudo tee /etc/apt/keyrings/grafana.gpg > /dev/null - echo "deb [signed-by=/etc/apt/keyrings/grafana.gpg] https://apt.grafana.com stable main" | sudo tee /etc/apt/sources.list.d/grafana.list - ``` - -1. Update the repositories: - - ```shell - sudo apt-get update - ``` - -1. Install Grafana Agent: - - ```shell - sudo apt-get install grafana-agent - ``` - -### Uninstall on Debian or Ubuntu - -To uninstall Grafana Agent on Debian or Ubuntu, run the following commands in a terminal window. - -1. Stop the systemd service for Grafana Agent: - - ```shell - sudo systemctl stop grafana-agent - ``` - -1. Uninstall Grafana Agent: - - ```shell - sudo apt-get remove grafana-agent - ``` - -1. Optional: Remove the Grafana repository: - - ```shell - sudo rm -i /etc/apt/sources.list.d/grafana.list - ``` - -## Install on RHEL or Fedora - -To install Grafana Agent in static mode on RHEL or Fedora, run the following commands in a terminal window. - -1. Import the GPG key: - - ```shell - wget -q -O gpg.key https://rpm.grafana.com/gpg.key - sudo rpm --import gpg.key - ``` - -1. Create `/etc/yum.repos.d/grafana.repo` with the following content: - - ```shell - [grafana] - name=grafana - baseurl=https://rpm.grafana.com - repo_gpgcheck=1 - enabled=1 - gpgcheck=1 - gpgkey=https://rpm.grafana.com/gpg.key - sslverify=1 - sslcacert=/etc/pki/tls/certs/ca-bundle.crt - ``` - -1. Optional: Verify the Grafana repository configuration: - - ```shell - cat /etc/yum.repos.d/grafana.repo - ``` - -1. Install Grafana Agent: - - ```shell - sudo dnf install grafana-agent - ``` - -### Uninstall on RHEL or Fedora - -To uninstall Grafana Agent on RHEL or Fedora, run the following commands in a terminal window: - -1. Stop the systemd service for Grafana Agent: - - ```shell - sudo systemctl stop grafana-agent - ``` - -1. Uninstall Grafana Agent: - - ```shell - sudo dnf remove grafana-agent - ``` - -1. Optional: Remove the Grafana repository: - - ```shell - sudo rm -i /etc/yum.repos.d/rpm.grafana.repo - ``` - -## Install on SUSE or openSUSE - -To install Grafana Agent in static mode on SUSE or openSUSE, run the following commands in a terminal window. - -1. Import the GPG key and add the Grafana package repository: - - ```shell - wget -q -O gpg.key https://apt.grafana.com/gpg.key - sudo rpm --import gpg.key - sudo zypper addrepo https://rpm.grafana.com grafana - ``` - -1. Update the repositories: - - ```shell - sudo zypper update - ``` - -1. Install Grafana Agent: - - ```shell - sudo zypper install grafana-agent - ``` - -### Uninstall on SUSE or openSUSE - -To uninstall Grafana Agent on SUSE or openSUSE, run the following commands in a terminal: - -1. Stop the systemd service for Grafana Agent: - - ```shell - sudo systemctl stop grafana-agent - ```` - -1. Uninstall Grafana Agent: - - ```shell - sudo zypper remove grafana-agent - ``` - -1. Optional: Remove the Grafana repository: - - ```shell - sudo zypper removerepo grafana - ``` - -## Operation guide - -The Grafana Agent is configured as a [systemd](https://systemd.io/) service. - -### Start the Agent - -To run Grafana Agent, run the following in a terminal: - - ```shell - sudo systemctl start grafana-agent - ``` - -To check the status of Grafana Agent, run the following command in a terminal: - - ```shell - sudo systemctl status grafana-agent - ``` - -### Run Grafana Agent on startup - -To automatically run Grafana Agent when the system starts, run the following command in a terminal: - - ```shell - sudo systemctl enable grafana-agent.service - ``` - -### Configuring Grafana Agent - -To configure Grafana Agent when installed on Linux, perform the following steps: - -1. Edit the default configuration file at `/etc/grafana-agent.yaml`. - -1. Run the following command in a terminal to reload the configuration file: - - ```shell - sudo systemctl reload grafana-agent - ``` - -### View Grafana Agent logs - -Logs of Grafana Agent can be found by running the following command in a terminal: - - ```shell - sudo journalctl -u grafana-agent - ``` - -## Next steps - -- [Start Grafana Agent][start] -- [Configure Grafana Agent][configure] - -{{% docs/reference %}} -[start]: "/docs/agent/ -> /docs/agent//static/set-up/start-agent" -[start]: "/docs/grafana-cloud/ -> ../start-agent" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration/create-config-file" -[configure]: "/docs/grafana-cloud/ -> ../../configuration/create-config-file" -{{% /docs/reference %}} diff --git a/docs/sources/static/set-up/install/install-agent-macos.md b/docs/sources/static/set-up/install/install-agent-macos.md deleted file mode 100644 index c23bd59ec5..0000000000 --- a/docs/sources/static/set-up/install/install-agent-macos.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -aliases: -- ../../set-up/install-agent-macos/ -- ../set-up/install-agent-macos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-macos/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-macos/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-macos/ -description: Learn how to install Grafana Agent in static mode on macOS -menuTitle: macOS -title: Install Grafana Agent in static mode on macOS -weight: 500 ---- - -# Install Grafana Agent in static mode on macOS - -You can install Grafana Agent in static mode on macOS with Homebrew. - -## Before you begin - -Install [Homebrew][] on your computer. - -{{< admonition type="note" >}} -The default prefix for Homebrew on Intel is `/usr/local`. The default prefix for Homebrew on Apple Silicon is `/opt/Homebrew`. To verify the default prefix for Homebrew on your computer, open a terminal window and type `brew --prefix`. -{{< /admonition >}} - -[Homebrew]: https://brew.sh - -## Install - -To install Grafana Agent on macOS, run the following commands in a terminal window. - -1. Update Homebrew: - - ```shell - brew update - ``` - -1. Install Grafana Agent: - - ```shell - brew install grafana-agent - ``` - -Grafana Agent is installed by default at `$(brew --prefix)/Cellar/grafana-agent/VERSION`. - -## Upgrade - -To upgrade Grafana Agent on macOS, run the following commands in a terminal window. - -1. Upgrade Grafana Agent: - - ```shell - brew upgrade grafana-agent - ``` - -1. Restart Grafana Agent: - - ```shell - brew services restart grafana-agent - -## Uninstall - -To uninstall Grafana Agent on macOS, run the following command in a terminal window: - -```shell -brew uninstall grafana-agent -``` - -## Configure - -1. To create the Agent `config.yml` file, open a terminal and run the following command: - - ```shell - touch $(brew --prefix)/etc/grafana-agent/config.yml - ``` - -1. Edit `$(brew --prefix)/etc/grafana-agent/config.yml` and add the configuration blocks for your specific telemetry needs. Refer to [Configure Grafana Agent][configure] for more information. - -{{< admonition type="note" >}} -To send your data to Grafana Cloud, set up Grafana Agent using the Grafana Cloud integration. Refer to [how to install an integration](/docs/grafana-cloud/data-configuration/integrations/install-and-manage-integrations/) and [macOS integration](/docs/grafana-cloud/data-configuration/integrations/integration-reference/integration-macos-node/). -{{< /admonition >}} - -## Next steps - -- [Start Grafana Agent][start] -- [Configure Grafana Agent][configure] - -{{% docs/reference %}} -[start]: "/docs/agent/ -> /docs/agent//static/set-up/start-agent" -[start]: "/docs/grafana-cloud/ -> ../start-agent" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration/create-config-file" -[configure]: "/docs/grafana-cloud/ -> ../../configuration/create-config-file" -{{% /docs/reference %}} diff --git a/docs/sources/static/set-up/install/install-agent-on-windows.md b/docs/sources/static/set-up/install/install-agent-on-windows.md deleted file mode 100644 index ddda581a53..0000000000 --- a/docs/sources/static/set-up/install/install-agent-on-windows.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -aliases: -- ../../set-up/install-agent-on-windows/ -- ../set-up/install-agent-on-windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/install/install-agent-on-windows/ -- /docs/grafana-cloud/send-data/agent/static/set-up/install/install-agent-on-windows/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/install/install-agent-on-windows/ -description: Learn how to install Grafana Agent in static mode on Windows -menuTitle: Windows -title: Install Grafana Agent in static mode on Windows -weight: 600 ---- - -# Install Grafana Agent in static mode on Windows - -You can install Grafana Agent in static mode on Windows as a standard graphical install, or as a silent install. - -## Standard install - -To do a standard graphical install of Grafana Agent on Windows, perform the following steps. - -1. Navigate to the [latest release](https://github.com/grafana/agent/releases) on GitHub. - -1. Scroll down to the **Assets** section. - -1. Download the file called `grafana-agent-installer.exe.zip`. - -1. Unzip the downloaded file. - -1. Double-click on `grafana-agent-installer.exe` to install Grafana Agent. - - Grafana Agent is installed into the default directory `C:\Program Files\Grafana Agent`. - - The following options are available: - - - The [windows_exporter integration][windows_exporter_config] can be enabled with all default windows_exporter options. - - The [-config.expand-env][flags] command line flag can be enabled. - -## Silent install - -To do a silent install of Grafana Agent on Windows, perform the following steps. - -1. Navigate to the [latest release](https://github.com/grafana/agent/releases) on GitHub. - -1. Scroll down to the **Assets** section. - -1. Download the file called `grafana-agent-installer.exe.zip`. - -1. Unzip the downloaded file. - -1. Run the following command in PowerShell or Command Prompt: - - ```shell - PATH_TO_INSTALLER/grafana-agent-installer.exe /S - ``` - - Replace `PATH_TO_INSTALLER` with the path where the unzipped installer executable is located. - -## Silent install with `remote_write` - -If you are using `remote_write` you must enable Windows Exporter and set the global remote_write configuration. - -1. Navigate to the [latest release](https://github.com/grafana/agent/releases) on GitHub. - -1. Scroll down to the **Assets** section. - -1. Download the file called `grafana-agent-installer.exe.zip`. - -1. Unzip the downloaded file. - -1. Run the following command in PowerShell or Command Prompt: - - ```shell - PATH_TO_INSTALLER/grafana-agent-installer.exe /S /EnableExporter true /Username USERNAME /Password PASSWORD /Url "http://example.com" - ``` - - Replace the following: - - - `PATH_TO_INSTALLER`: The path where the unzipped installer executable is located. - - `USERNAME`: Your username - - `PASSWORD`: Your password - - If you are using Powershell, make sure you use triple quotes `"""http://example.com"""` around the URL parameter. - -## Silent install with `-config.expand_env` - -You can enable [-config.expand-env][flags] during a silent install. - -1. Navigate to the [latest release](https://github.com/grafana/agent/releases) on GitHub. - -1. Scroll down to the **Assets** section. - -1. Download the file called `grafana-agent-installer.exe.zip`. - -1. Unzip the downloaded file. - -1. Run the following command in PowerShell or Command Prompt: - - ```shell - PATH_TO_INSTALLER/grafana-agent-installer.exe /S /ExpandEnv true - ``` - -## Verify the installation - -1. Make sure you can access `http://localhost:12345/-/healthy` and `http://localhost:12345/agent/api/v1/metrics/targets`. - -1. Optional: You can adjust `C:\Program Files\Grafana Agent\agent-config.yaml` to meet your specific needs. After changing the configuration file, restart the Grafana Agent service to load changes to the configuration. - -Existing configuration files are kept when re-installing or upgrading the Grafana Agent. - -## Security - -A configuration file for Grafana Agent is provided by default at `C:\Program Files\Grafana Agent`. Depending on your configuration, you can modify the default permissions of the file or move it to another directory. - -If you change the location of the configuration file, do the following step. - -1. Update the Grafana Agent service to load the new path. - -1. Run the following with Administrator privileges in PowerShell or Command Prompt: - - ```shell - sc config "Grafana Agent" binpath= "INSTALLED_DIRECTORY\agent-windows-amd64.exe -config.file=\"PATH_TO_CONFIG\agent-config.yaml\"" - ``` - - Replace `PATH_TO_CONFIG` with the full path to your Grafana Agent configuratiuon file. - -## Uninstall Grafana Agent - -You can uninstall Grafana Agent with Windows Remove Programs or `C:\Program Files\Grafana Agent\uninstaller.exe`. -Uninstalling Grafana Agent will stop the service and remove it from disk. This includes any configuration files in the installation directory. - -Grafana Agent can also be silently uninstalled by running `uninstall.exe /S` as Administrator. - -## Push Windows logs to Grafana Loki - -Grafana Agent can use the embedded [promtail](/docs/loki/latest/clients/promtail/) to push Windows Event Logs to [Grafana Loki](https://github.com/grafana/loki). Example configuration below: - -```yaml -server: - log_level: debug -logs: - # Choose a directory to save the last read position of log files at. - # This directory will be created if it doesn't already exist. - positions_directory: "C:\\path\\to\\directory" - configs: - - name: windows - # Loki endpoint to push logs to - clients: - - url: https://example.com - scrape_configs: - - job_name: windows - windows_events: - # Note the directory structure must already exist but the file will be created on demand - bookmark_path: "C:\\path\\to\\bookmark\\directory\\bookmark.xml" - use_incoming_timestamp: false - eventlog_name: "Application" - # Filter for logs - xpath_query: '*' - labels: - job: windows -``` - -Refer to [windows_events](/docs/loki/latest/clients/promtail/configuration/#windows_events) for additional configuration details. - -## Next steps - -- [Start Grafana Agent][start] -- [Configure Grafana Agent][configure] - -{{% docs/reference %}} -[flags]: "/docs/agent/ -> /docs/agent//static/configuration/flags" -[windows_exporter_config]: "/docs/agent/ -> /docs/agent//static/configuration/integrations/windows-exporter-config" -[start]: "/docs/agent/ -> /docs/agent//static/set-up/start-agent" -[start]: "/docs/grafana-cloud/ -> ../start-agent" -[configure]: "/docs/agent/ -> /docs/agent//static/configuration/create-config-file" -[configure]: "/docs/grafana-cloud/ -> ../../configuration/create-config-file" -{{% /docs/reference %}} diff --git a/docs/sources/static/set-up/quick-starts.md b/docs/sources/static/set-up/quick-starts.md deleted file mode 100644 index 848630ab5b..0000000000 --- a/docs/sources/static/set-up/quick-starts.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -aliases: -- ../../set-up/quick-starts/ -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/quick-starts/ -- /docs/grafana-cloud/send-data/agent/static/set-up/quick-starts/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/quick-starts/ -description: Learn how to get started with Grafana Agent in static mode -menuTitle: Get started -title: Grafana Agent quick starts -weight: 300 ---- - -# Grafana Agent quick starts - -The following quick starts help you get up and running with Grafana Agent. You’ll learn how to send your metrics, logs, and traces to the Grafana Stack or Grafana Cloud. - -## Grafana Stack quick starts - -- [Send metrics to Mimir](/docs/mimir/latest/get-started/) using Grafana Agent. - -- [Send traces to Tempo](/docs/tempo/latest/getting-started/#2-pipeline-grafana-agent) using Grafana Agent. - -- [Send logs to Loki](/docs/grafana-cloud/logs/collect-logs-with-agent/) using Grafana Agent. - -## Grafana Cloud quick starts - -- [Grafana Agent for Grafana Cloud](/docs/grafana-cloud/monitor-infrastructure/integrations/get-started/). -- [Monitoring a Linux host](/docs/grafana-cloud/quickstart/agent_linuxnode/) using the Linux Node integration. -- [Grafana Agent Kubernetes configuration](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/). diff --git a/docs/sources/static/set-up/start-agent.md b/docs/sources/static/set-up/start-agent.md deleted file mode 100644 index dfbb9b0117..0000000000 --- a/docs/sources/static/set-up/start-agent.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/static/set-up/start-agent/ -- /docs/grafana-cloud/send-data/agent/static/set-up/start-agent/ -canonical: https://grafana.com/docs/agent/latest/static/set-up/start-agent/ -description: Learn how to start, restart, and stop Grafana Agent in static mode -menuTitle: Start static mode -title: Start, restart, and stop Grafana Agent in static mode -weight: 200 ---- - -# Start, restart, and stop Grafana Agent in static mode - -You can start, restart, and stop Grafana Agent after it is installed. - -## Linux - -Grafana Agent is installed as a [systemd][] service on Linux. - -[systemd]: https://systemd.io/ - -### Start Grafana Agent - -To start Grafana Agent, run the following command in a terminal window: - -```shell -sudo systemctl start grafana-agent -``` - -(Optional) Verify that the service is running: - -```shell -sudo systemctl status grafana-agent -``` - -### Configure Grafana Agent to start at boot - -To automatically run Grafana Agent when the system starts, run the following command in a terminal window: - -```shell -sudo systemctl enable grafana-agent.service -``` - -### Restart Grafana Agent - -To restart Grafana Agent, run the following command in a terminal window: - -```shell -sudo systemctl restart grafana-agent -``` - -### Stop Grafana Agent - -To stop Grafana Agent, run the following command in a terminal window: - -```shell -sudo systemctl stop grafana-agent -``` - -### View Grafana Agent logs on Linux - -To view the Grafana Agent log files, run the following command in a terminal window: - -```shell -sudo journalctl -u grafana-agent -``` - -## macOS - -Grafana Agent is installed as a launchd service on macOS. - -### Start Grafana Agent - -To start Grafana Agent, run the following command in a terminal window: - -```shell -brew services start grafana-agent -``` - -Grafana Agent automatically runs when the system starts. - -Optional: Verify that the service is running: - -```shell -brew services info grafana-agent -``` - -### Restart Grafana Agent - -To restart Grafana Agent, run the following command in a terminal window: - -```shell -brew services restart grafana-agent -``` - -### Stop Grafana Agent - -To stop Grafana Agent, run the following command in a terminal window: - -```shell -brew services stop grafana-agent -``` - -### View Grafana Agent logs on macOS - -By default, logs are written to `$(brew --prefix)/var/log/grafana-agent.log` and -`$(brew --prefix)/var/log/grafana-agent.err.log`. - -If you followed [Configure][configure] steps in the macOS install instructions and changed the path where logs are written, refer to your current copy of the Grafana Agent formula to locate your log files. - -## Windows - -Grafana Agent is installed as a Windows Service. The service is configured to automatically run on startup. - -To verify that Grafana Agent is running as a Windows Service: - -1. Open the Windows Services manager (services.msc): - - 1. Right click on the Start Menu and select **Run**. - - 1. Type: `services.msc` and click **OK**. - -1. Scroll down to find the **Grafana Agent** service and verify that the **Status** is **Running**. - -### View Grafana Agent logs - -When running on Windows, Grafana Agent writes its logs to Windows Event -Logs with an event source name of **Grafana Agent**. - -To view the logs, perform the following steps: - -1. Open the Event Viewer: - - 1. Right click on the Start Menu and select **Run**. - - 1. Type `eventvwr` and click **OK**. - -1. In the Event Viewer, click on **Windows Logs > Application**. - -1. Search for events with the source **Grafana Agent**. - -## Standalone binary - -If you downloaded the standalone binary, you must run the agent from a terminal or command window. - -### Start Grafana Agent on Linux, macOS, FreeBSD, or Windows - -To start Grafana Agent on Linux, macOS, FreeBSD, or Windows run the following command in a terminal window or command prompt: - -```shell -BINARY_PATH -config.file CONFIG_FILE -``` - -Replace the following: - -* `BINARY_PATH`: The path to the Grafana Agent binary file -* `CONFIG_FILE`: The path to the Grafana Agent configuration file. - -{{% docs/reference %}} -[configure]: "/docs/agent/ -> /docs/agent//static/set-up/install/install-agent-macos#configure" -[configure]: "/docs/grafana-cloud/ -> ./install/install-agent-macos/#configure" -{{% /docs/reference %}} diff --git a/docs/sources/tasks/_index.md b/docs/sources/tasks/_index.md new file mode 100644 index 0000000000..da85a2fe2e --- /dev/null +++ b/docs/sources/tasks/_index.md @@ -0,0 +1,13 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/ +description: How to perform common tasks with Grafana Alloy +menuTitle: Tasks +title: Tasks with Grafana Alloy +weight: 200 +--- + +# Tasks with {{% param "PRODUCT_NAME" %}} + +This section details how to perform common tasks with {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/flow/tasks/collect-opentelemetry-data.md b/docs/sources/tasks/collect-opentelemetry-data.md similarity index 81% rename from docs/sources/flow/tasks/collect-opentelemetry-data.md rename to docs/sources/tasks/collect-opentelemetry-data.md index 22248f9f70..935639e96a 100644 --- a/docs/sources/flow/tasks/collect-opentelemetry-data.md +++ b/docs/sources/tasks/collect-opentelemetry-data.md @@ -1,16 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/collect-opentelemetry-data/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/collect-opentelemetry-data/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/collect-opentelemetry-data/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/collect-opentelemetry-data/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/collect-opentelemetry-data/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/collect-opentelemetry-data/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/collect-opentelemetry-data/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/collect-opentelemetry-data/ -- ../getting-started/collect-opentelemetry-data/ # /docs/agent/latest/flow/getting-started/collect-opentelemetry-data/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/collect-opentelemetry-data/ +canonical: https://grafana.com/docs/alloy/latest/tasks/collect-opentelemetry-data/ description: Learn how to collect OpenTelemetry data title: Collect OpenTelemetry data weight: 300 @@ -18,8 +7,7 @@ weight: 300 # Collect OpenTelemetry data -{{< param "PRODUCT_NAME" >}} can be configured to collect [OpenTelemetry][]-compatible -data and forward it to any OpenTelemetry-compatible endpoint. +{{< param "PRODUCT_NAME" >}} can be configured to collect [OpenTelemetry][]-compatible data and forward it to any OpenTelemetry-compatible endpoint. This topic describes how to: @@ -318,18 +306,9 @@ For more information on receiving OpenTelemetry data using the OpenTelemetry Pro [OpenTelemetry]: https://opentelemetry.io [Configure an OpenTelemetry Protocol exporter]: #configure-an-opentelemetry-protocol-exporter [Configure batching]: #configure-batching - -{{% docs/reference %}} -[otelcol.auth.basic]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.auth.basic.md" -[otelcol.auth.basic]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.basic.md" -[otelcol.exporter.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.otlp.md" -[otelcol.exporter.otlp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlp.md" -[otelcol.exporter.otlphttp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.otlphttp.md" -[otelcol.exporter.otlphttp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlphttp.md" -[otelcol.processor.batch]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.processor.batch.md" -[otelcol.processor.batch]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.batch.md" -[otelcol.receiver.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.receiver.otlp.md" -[otelcol.receiver.otlp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.otlp.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -{{% /docs/reference %}} +[otelcol.auth.basic]: ../../reference/components/otelcol.auth.basic/ +[otelcol.exporter.otlp]: ../../reference/components/otelcol.exporter.otlp/ +[otelcol.exporter.otlphttp]: ../../reference/components/otelcol.exporter.otlphttp/ +[otelcol.processor.batch]: ../../reference/components/otelcol.processor.batch/ +[otelcol.receiver.otlp]: ../../reference/components/otelcol.receiver.otlp/ +[Components]: ../../concepts/components/ diff --git a/docs/sources/flow/tasks/collect-prometheus-metrics.md b/docs/sources/tasks/collect-prometheus-metrics.md similarity index 87% rename from docs/sources/flow/tasks/collect-prometheus-metrics.md rename to docs/sources/tasks/collect-prometheus-metrics.md index 350ce1ccfd..a317f57780 100644 --- a/docs/sources/flow/tasks/collect-prometheus-metrics.md +++ b/docs/sources/tasks/collect-prometheus-metrics.md @@ -1,16 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/collect-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/collect-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/collect-prometheus-metrics/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/collect-prometheus-metrics/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/collect-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/collect-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/collect-prometheus-metrics/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/collect-prometheus-metrics/ -- ../getting-started/collect-prometheus-metrics/ # /docs/agent/latest/flow/getting-started/collect-prometheus-metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/collect-prometheus-metrics/ +canonical: https://grafana.com/docs/alloy/latest/tasks/collect-prometheus-metrics/ description: Learn how to collect and forward Prometheus metrics title: Collect and forward Prometheus metrics weight: 200 @@ -436,16 +425,8 @@ prometheus.remote_write "default" { [Field Selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ [Labels and Selectors]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#set-based-requirement [Configure metrics delivery]: #configure-metrics-delivery - -{{% docs/reference %}} -[discovery.kubernetes]: "/docs/agent/ -> /docs/agent//flow/reference/components/discovery.kubernetes.md" -[discovery.kubernetes]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.kubernetes.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write.md" -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[Objects]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values.md#objects" -[Objects]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values.md#objects" -{{% /docs/reference %}} +[discovery.kubernetes]: ../../reference/components/discovery.kubernetes/ +[prometheus.remote_write]: ../../reference/components/prometheus.remote_write/ +[prometheus.scrape]: ../../reference/components/prometheus.scrape/ +[Components]: ../../concepts/components/ +[Objects]: ../../concepts/config-language/expressions/types_and_values/#objects diff --git a/docs/sources/tasks/configure-agent-clustering.md b/docs/sources/tasks/configure-agent-clustering.md new file mode 100644 index 0000000000..024f8a5392 --- /dev/null +++ b/docs/sources/tasks/configure-agent-clustering.md @@ -0,0 +1,60 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/configure-agent-clustering/ +description: Learn how to configure Grafana Alloy clustering in an existing installation +menuTitle: Configure clustering +title: Configure Grafana Alloy clustering in an existing installation +weight: 400 +--- + +# Configure {{% param "PRODUCT_NAME" %}} clustering in an existing installation + +You can configure {{< param "PRODUCT_NAME" >}} to run with [clustering][] so that individual {{< param "PRODUCT_ROOT_NAME" >}}s can work together for workload distribution and high availability. + +{{< admonition type="note" >}} +Clustering is a [beta][] feature. Beta features are subject to breaking changes and may be replaced with equivalent functionality that covers the same use case. + +[beta]: ../../stability/#beta +{{< /admonition >}} + +This topic describes how to add clustering to an existing installation. + +## Configure {{% param "PRODUCT_NAME" %}} clustering with Helm Chart + +This section guides you through enabling clustering when {{< param "PRODUCT_NAME" >}} is installed on Kubernetes using the {{< param "PRODUCT_ROOT_NAME" >}} [Helm chart][install-helm]. + +### Before you begin + +- Ensure that your `values.yaml` file has `controller.type` set to `statefulset`. + +### Steps + +To configure clustering: + +1. Amend your existing `values.yaml` file to add `clustering.enabled=true` inside the `agent` block. + + ```yaml + agent: + clustering:"/docs/agent/ -> /docs/agent//flow + enabled: true + ``` + +1. Upgrade your installation to use the new `values.yaml` file: + + ```bash + helm upgrade -f values.yaml + ``` + + Replace the following: + + - _``_: The name of the installation you chose when you installed the Helm chart. + +1. Use the {{< param "PRODUCT_NAME" >}} [UI][] to verify the cluster status: + + 1. Click **Clustering** in the navigation bar. + + 1. Ensure that all expected nodes appear in the resulting table. + +[clustering]: ../../concepts/clustering/ +[beta]: ../../stability/#beta +[install-helm]: ../../get-started/install/kubernetes/ +[UI]: ../debug/#component-detail-page diff --git a/docs/sources/tasks/configure/_index.md b/docs/sources/tasks/configure/_index.md new file mode 100644 index 0000000000..b8bff7751a --- /dev/null +++ b/docs/sources/tasks/configure/_index.md @@ -0,0 +1,22 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/configure/ +description: Configure Grafana Alloy after it is installed +menuTitle: Configure +title: Configure Grafana Alloy +weight: 90 +--- + +# Configure {{% param "PRODUCT_NAME" %}} + +You can configure {{< param "PRODUCT_NAME" >}} after it is [installed][Install]. +The default River configuration file for {{< param "PRODUCT_NAME" >}} is located at: + +* Linux: `/etc/grafana-agent-flow.river` +* macOS: `$(brew --prefix)/etc/grafana-agent-flow/config.river` +* Windows: `C:\Program Files\Grafana Agent Flow\config.river` + +This section includes information that helps you configure {{< param "PRODUCT_NAME" >}}. + +{{< section >}} + +[Install]: ../../get-started/install/ diff --git a/docs/sources/flow/tasks/configure/configure-kubernetes.md b/docs/sources/tasks/configure/configure-kubernetes.md similarity index 55% rename from docs/sources/flow/tasks/configure/configure-kubernetes.md rename to docs/sources/tasks/configure/configure-kubernetes.md index 2941f68a42..822102f80f 100644 --- a/docs/sources/flow/tasks/configure/configure-kubernetes.md +++ b/docs/sources/tasks/configure/configure-kubernetes.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure/configure-kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-kubernetes/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/configure/configure-kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-kubernetes/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-kubernetes/ -- /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-kubernetes/ -- ../../setup/configure/configure-kubernetes/ # /docs/agent/latest/flow/setup/configure/configure-kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-kubernetes/ -description: Learn how to configure Grafana Agent Flow on Kubernetes +canonical: https://grafana.com/docs/alloy/latest/tasks/configure/configure-kubernetes/ +description: Learn how to configure Grafana Alloy on Kubernetes menuTitle: Kubernetes -title: Configure Grafana Agent Flow on Kubernetes +title: Configure Grafana Alloy on Kubernetes weight: 200 --- @@ -23,8 +12,7 @@ To configure {{< param "PRODUCT_NAME" >}} on Kubernetes, perform the following s 1. Download a local copy of [values.yaml][] for the Helm chart. -1. Make changes to your copy of `values.yaml` to customize settings for the - Helm chart. +1. Make changes to your copy of `values.yaml` to customize settings for the Helm chart. Refer to the inline documentation in the `values.yaml` for more information about each option. @@ -38,14 +26,10 @@ To configure {{< param "PRODUCT_NAME" >}} on Kubernetes, perform the following s 1. Replace `VALUES_PATH` with the path to your copy of `values.yaml` to use. -[values.yaml]: https://raw.githubusercontent.com/grafana/agent/main/operations/helm/charts/grafana-agent/values.yaml - ## Kustomize considerations -If you are using [Kustomize][] to inflate and install the [Helm chart][], be careful -when using a `configMapGenerator` to generate the ConfigMap containing the -configuration. By default, the generator appends a hash to the name and patches -the resource mentioning it, triggering a rolling update. +If you are using [Kustomize][] to inflate and install the [Helm chart][], be careful when using a `configMapGenerator` to generate the ConfigMap containing the configuration. +By default, the generator appends a hash to the name and patches the resource mentioning it, triggering a rolling update. This behavior is undesirable for {{< param "PRODUCT_NAME" >}} because the startup time can be significant depending on the size of the Write-Ahead Log. You can use the [Helm chart][] sidecar container to watch the ConfigMap and trigger a dynamic reload. @@ -60,6 +44,6 @@ configMapGenerator: options: disableNameSuffixHash: true ``` - +[values.yaml]: https://raw.githubusercontent.com/grafana/agent/main/operations/helm/charts/grafana-agent/values.yaml [Helm chart]: https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent [Kustomize]: https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/ diff --git a/docs/sources/flow/tasks/configure/configure-linux.md b/docs/sources/tasks/configure/configure-linux.md similarity index 56% rename from docs/sources/flow/tasks/configure/configure-linux.md rename to docs/sources/tasks/configure/configure-linux.md index 4b0bd3344e..b0757e9338 100644 --- a/docs/sources/flow/tasks/configure/configure-linux.md +++ b/docs/sources/tasks/configure/configure-linux.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure/configure-linux/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-linux/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-linux/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/configure/configure-linux/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-linux/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-linux/ -- /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-linux/ -- ../../setup/configure/configure-linux/ # /docs/agent/latest/flow/setup/configure/configure-linux/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-linux/ -description: Learn how to configure Grafana Agent Flow on Linux +canonical: https://grafana.com/docs/alloy/latest/tasks/configure/configure-linux/ +description: Learn how to configure Grafana Alloy on Linux menuTitle: Linux -title: Configure Grafana Agent Flow on Linux +title: Configure Grafana Alloy on Linux weight: 300 --- @@ -36,8 +25,7 @@ To change the configuration file used by the service, perform the following step * Debian or Ubuntu: edit `/etc/default/grafana-agent-flow` * RHEL/Fedora or SUSE/openSUSE: edit `/etc/sysconfig/grafana-agent-flow` -1. Change the contents of the `CONFIG_FILE` environment variable to point to - the new configuration file to use. +1. Change the contents of the `CONFIG_FILE` environment variable to point at the new configuration file to use. 1. Restart the {{< param "PRODUCT_NAME" >}} service: @@ -47,13 +35,11 @@ To change the configuration file used by the service, perform the following step ## Pass additional command-line flags -By default, the {{< param "PRODUCT_NAME" >}} service launches with the [run][] -command, passing the following flags: +By default, the {{< param "PRODUCT_NAME" >}} service launches with the [run][] command, passing the following flags: * `--storage.path=/var/lib/grafana-agent-flow` -To pass additional command-line flags to the {{< param "PRODUCT_NAME" >}} binary, perform -the following steps: +To pass additional command-line flags to the {{< param "PRODUCT_NAME" >}} binary, perform the following steps: 1. Edit the environment file for the service: @@ -69,14 +55,12 @@ the following steps: sudo systemctl restart grafana-agent-flow ``` -To see the list of valid command-line flags that can be passed to the service, -refer to the documentation for the [run][] command. +To see the list of valid command-line flags that can be passed to the service, refer to the documentation for the [run][] command. ## Expose the UI to other machines -By default, {{< param "PRODUCT_NAME" >}} listens on the local network for its HTTP -server. This prevents other machines on the network from being able to access -the [UI for debugging][UI]. +By default, {{< param "PRODUCT_NAME" >}} listens on the local network for its HTTP server. +This prevents other machines on the network from being able to access the [UI for debugging][UI]. To expose the UI to other machines, complete the following steps: @@ -96,9 +80,5 @@ To expose the UI to other machines, complete the following steps: To listen on all interfaces, replace `LISTEN_ADDR` with `0.0.0.0`. -{{% docs/reference %}} -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} +[run]:../../../reference/cli/run/ +[UI]: ../../debug/#grafana-alloy-ui diff --git a/docs/sources/flow/tasks/configure/configure-macos.md b/docs/sources/tasks/configure/configure-macos.md similarity index 58% rename from docs/sources/flow/tasks/configure/configure-macos.md rename to docs/sources/tasks/configure/configure-macos.md index 8b860a010d..d57885617e 100644 --- a/docs/sources/flow/tasks/configure/configure-macos.md +++ b/docs/sources/tasks/configure/configure-macos.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure/configure-macos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-macos/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-macos/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/configure/configure-macos/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-macos/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-macos/ -- /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-macos/ -- ../../setup/configure/configure-macos/ # /docs/agent/latest/flow/setup/configure/configure-macos/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-macos/ -description: Learn how to configure Grafana Agent Flow on macOS +canonical: https://grafana.com/docs/alloy/latest/tasks/configure/configure-macos/ +description: Learn how to configure Grafana Alloy on macOS menuTitle: macOS -title: Configure Grafana Agent Flow on macOS +title: Configure Grafana Alloy on macOS weight: 400 --- @@ -32,13 +21,10 @@ To configure {{< param "PRODUCT_NAME" >}} on macOS, perform the following steps: ## Configure the {{% param "PRODUCT_NAME" %}} service {{< admonition type="note" >}} -Due to limitations in Homebrew, customizing the service used by -{{< param "PRODUCT_NAME" >}} on macOS requires changing the Homebrew formula and -reinstalling {{< param "PRODUCT_NAME" >}}. +Due to limitations in Homebrew, customizing the service used by {{< param "PRODUCT_NAME" >}} on macOS requires changing the Homebrew formula and reinstalling {{< param "PRODUCT_NAME" >}}. {{< /admonition >}} -To customize the {{< param "PRODUCT_NAME" >}} service on macOS, perform the following -steps: +To customize the {{< param "PRODUCT_NAME" >}} service on macOS, perform the following steps: 1. Run the following command in a terminal: @@ -70,9 +56,8 @@ steps: ## Expose the UI to other machines -By default, {{< param "PRODUCT_NAME" >}} listens on the local network for its HTTP -server. This prevents other machines on the network from being able to access -the [UI for debugging][UI]. +By default, {{< param "PRODUCT_NAME" >}} listens on the local network for its HTTP server. +This prevents other machines on the network from being able to access the [UI for debugging][UI]. To expose the UI to other machines, complete the following steps: @@ -87,7 +72,4 @@ To expose the UI to other machines, complete the following steps: To listen on all interfaces, replace `127.0.0.1` with `0.0.0.0`. -{{% docs/reference %}} -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} +[UI]: ../../debug/#grafana-alloy-ui diff --git a/docs/sources/flow/tasks/configure/configure-windows.md b/docs/sources/tasks/configure/configure-windows.md similarity index 66% rename from docs/sources/flow/tasks/configure/configure-windows.md rename to docs/sources/tasks/configure/configure-windows.md index 806579ea13..93fc1a4f1a 100644 --- a/docs/sources/flow/tasks/configure/configure-windows.md +++ b/docs/sources/tasks/configure/configure-windows.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/configure/configure-windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-windows/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-windows/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-windows/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/setup/configure/configure-windows/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-windows/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-windows/ -- /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-windows/ -- ../../setup/configure/configure-windows/ # /docs/agent/latest/flow/setup/configure/configure-windows/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-windows/ -description: Learn how to configure Grafana Agent Flow on Windows +canonical: https://grafana.com/docs/alloy/latest/tasks/configure/configure-windows/ +description: Learn how to configure Grafana Alloy on Windows menuTitle: Windows -title: Configure Grafana Agent Flow on Windows +title: Configure Grafana Alloy on Windows weight: 500 --- @@ -95,8 +84,4 @@ To expose the UI to other machines, complete the following steps: To listen on all interfaces, replace `LISTEN_ADDR` with `0.0.0.0`. -{{% docs/reference %}} -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" -{{% /docs/reference %}} - +[UI]: ../../debug/#grafana-alloy-ui diff --git a/docs/sources/flow/tasks/debug.md b/docs/sources/tasks/debug.md similarity index 61% rename from docs/sources/flow/tasks/debug.md rename to docs/sources/tasks/debug.md index 331307a58d..4f2615dc5c 100644 --- a/docs/sources/flow/tasks/debug.md +++ b/docs/sources/tasks/debug.md @@ -1,18 +1,7 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/debug/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/debug/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/debug/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/debug/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/monitoring/debugging/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/debugging/ -- /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging/ -- ../monitoring/debugging/ # /docs/agent/latest/flow/monitoring/debugging/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/debugging/ -description: Learn about debugging issues with Grafana Agent Flow -title: Debug issues with Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/tasks/debug/ +description: Learn about debugging issues with Grafana alloy +title: Debug issues with Grafana Alloy menuTitle: Debug issues weight: 1000 --- @@ -28,14 +17,17 @@ Follow these steps to debug issues with {{< param "PRODUCT_NAME" >}}: {{< param "PRODUCT_NAME" >}} includes an embedded UI viewable from the {{< param "PRODUCT_ROOT_NAME" >}} HTTP server, which defaults to listening at `http://localhost:12345`. -> **NOTE**: For security reasons, installations of {{< param "PRODUCT_NAME" >}} on non-containerized platforms default to listening on `localhost`. -> This default prevents other machines on the network from being able to view the UI. -> -> To expose the UI to other machines on the network on non-containerized platforms, refer to the documentation for how you [installed][install] {{< param "PRODUCT_NAME" >}}. -> -> If you are running a custom installation of {{< param "PRODUCT_NAME" >}}, -> refer to the documentation for [the `grafana-agent run` command][grafana-agent run] to learn how to change the HTTP listen address, -> and pass the appropriate flag when running {{< param "PRODUCT_NAME" >}}. +{{< admonition type="note" >}} +For security reasons, installations of {{< param "PRODUCT_NAME" >}} on non-containerized platforms default to listening on `localhost`. +This default prevents other machines on the network from being able to view the UI. + +To expose the UI to other machines on the network on non-containerized platforms, refer to the documentation for how you [installed][install] {{< param "PRODUCT_NAME" >}}. + +If you are running a custom installation of {{< param "PRODUCT_NAME" >}}, refer to the documentation for the [`grafana-agent run` command][grafana-agent run] to learn how to change the HTTP listen address, > and pass the appropriate flag when running {{< param "PRODUCT_NAME" >}}. + +[install]: ../../get-started/install/ +[grafana-agent run]: ../../reference/cli/run/ +{{< /admonition >}} ### Home page @@ -113,16 +105,6 @@ To debug issues when using [clustering][], check for the following symptoms. - **Node stuck in terminating state**: The node attempted to gracefully shut down and set its state to Terminating, but it has not completely gone away. Check the clustering page to view the state of the peers and verify that the terminating {{< param "PRODUCT_ROOT_NAME" >}} has been shut down. -{{% docs/reference %}} -[logging]: "/docs/agent/ -> /docs/agent//flow/reference/config-blocks/logging.md" -[logging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/logging.md" -[clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering.md" -[clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering.md" -[install]: "/docs/agent/ -> /docs/agent//flow/get-started/install" -[install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install" -[secret]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values.md#secrets.md" -[secret]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values.md#secrets.md" -[grafana-agent run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[grafana-agent run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -{{% /docs/reference %}} - +[logging]: ../../reference/config-blocks/logging/ +[clustering]: ../../concepts/clustering/ +[secret]: ../../concepts/config-language/expressions/types_and_values/#secrets diff --git a/docs/sources/tasks/distribute-prometheus-scrape-load.md b/docs/sources/tasks/distribute-prometheus-scrape-load.md new file mode 100644 index 0000000000..9c7dbc41ee --- /dev/null +++ b/docs/sources/tasks/distribute-prometheus-scrape-load.md @@ -0,0 +1,51 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/distribute-prometheus-scrape-load/ +description: Learn how to distribute your Prometheus metrics scrape load +menuTitle: Distribute Prometheus metrics scrape load +title: Distribute Prometheus metrics scrape load +weight: 500 +--- + +# Distribute Prometheus metrics scrape load + +A good predictor for the size of an {{< param "PRODUCT_NAME" >}} deployment is the number of Prometheus targets each {{< param "PRODUCT_ROOT_NAME" >}} scrapes. +[Clustering][] with target auto-distribution allows a fleet of {{< param "PRODUCT_ROOT_NAME" >}}s to work together to dynamically distribute their scrape load, providing high-availability. + +{{< admonition type="note" >}} +Clustering is a [beta][] feature. Beta features are subject to breaking changes and may be replaced with equivalent functionality that covers the same use case. + +[beta]: ../../stability/#beta +{{< /admonition >}} + +## Before you begin + +- Familiarize yourself with how to [configure][] existing {{< param "PRODUCT_NAME" >}} installations. +- [Configure Prometheus metrics collection][]. +- [Configure clustering][]. +- Ensure that all of your clustered {{< param "PRODUCT_ROOT_NAME" >}}s have the same configuration file. + +## Steps + +To distribute Prometheus metrics scrape load with clustering: + +1. Add the following block to all `prometheus.scrape` components, which should use auto-distribution: + + ```river + clustering { + enabled = true + } + ``` + +1. Restart or reload {{< param "PRODUCT_ROOT_NAME" >}}s for them to use the new configuration. + +1. Validate that auto-distribution is functioning: + + 1. Using the {{< param "PRODUCT_ROOT_NAME" >}} [UI][] on each {{< param "PRODUCT_ROOT_NAME" >}}, navigate to the details page for one of the `prometheus.scrape` components you modified. + + 1. Compare the Debug Info sections between two different {{< param "PRODUCT_ROOT_NAME" >}} to ensure that they're not scraping the same sets of targets. + +[Clustering]: ../../concepts/clustering/ +[configure]: ../configure/ +[Configure Prometheus metrics collection]: ../collect-prometheus-metrics/ +[Configure clustering]: ../configure-agent-clustering/ +[UI]: ../debug/#component-detail-page diff --git a/docs/sources/tasks/estimate-resource-usage.md b/docs/sources/tasks/estimate-resource-usage.md new file mode 100644 index 0000000000..cac6956363 --- /dev/null +++ b/docs/sources/tasks/estimate-resource-usage.md @@ -0,0 +1,58 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/resource-usage/ +description: Estimate expected Grafana Alloy resource usage +headless: true +title: Estimate resource usage +menuTitle: Estimate resource usage +weight: 190 +--- + +# Estimate {{% param "PRODUCT_NAME" %}} resource usage + +This page provides guidance for expected resource usage of {{< param "PRODUCT_NAME" >}} for each telemetry type, based on operational experience of some of the {{< param "PRODUCT_NAME" >}} maintainers. + +{{< admonition type="note" >}} +The resource usage depends on the workload, hardware, and the configuration used. +The information on this page is a good starting point for most users, but your actual usage may be different. +{{< /admonition >}} + +## Prometheus metrics + +The Prometheus metrics resource usage depends mainly on the number of active series that need to be scraped and the scrape interval. + +As a rule of thumb, **per each 1 million active series** and with the default scrape interval, you can expect to use approximately: + +* 0.4 CPU cores +* 11 GiB of memory +* 1.5 MiB/s of total network bandwidth, send and receive + +These recommendations are based on deployments that use [clustering][], but they will broadly apply to other deployment modes. +Refer to [Deploy {{< param "PRODUCT_NAME" >}}][deploy] for more information on how to deploy {{< param "PRODUCT_NAME" >}}. + +## Loki logs + +Loki logs resource usage depends mainly on the volume of logs ingested. + +As a rule of thumb, **per each 1 MiB/second of logs ingested**, you can expect to use approximately: + +* 1 CPU core +* 120 MiB of memory + +These recommendations are based on Kubernetes DaemonSet deployments on clusters with relatively small number of nodes and high logs volume on each. +The resource usage can be higher per each 1 MiB/second of logs if you have a large number of small nodes due to the constant overhead of running the {{< param "PRODUCT_NAME" >}} on each node. + +Additionally, factors such as number of labels, number of files and average log line length may all play a role in the resource usage. + +## Pyroscope profiles + +Pyroscope profiles resource usage depends mainly on the volume of profiles. + +As a rule of thumb, **per each 100 profiles/second**, you can expect to use approximately: + +* 1 CPU core +* 10 GiB of memory + +Factors such as size of each profile and frequency of fetching them also play a role in the overall resource usage. + +[deploy]: ../../get-started/deploy-alloy/ +[clustering]: ../../concepts/clustering/ diff --git a/docs/sources/tasks/migrate/_index.md b/docs/sources/tasks/migrate/_index.md new file mode 100644 index 0000000000..9ee1f3238a --- /dev/null +++ b/docs/sources/tasks/migrate/_index.md @@ -0,0 +1,13 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/migrate/ +description: How to migrate to Grafana Alloy +menuTitle: Migrate +title: Migrate to Grafana Alloy +weight: 100 +--- + +# How to migrate to {{% param "PRODUCT_NAME" %}} + +This section details how to migrate to {{< param "PRODUCT_NAME" >}} from other common solutions. + +{{< section >}} diff --git a/docs/sources/flow/tasks/migrate/from-operator.md b/docs/sources/tasks/migrate/from-operator.md similarity index 69% rename from docs/sources/flow/tasks/migrate/from-operator.md rename to docs/sources/tasks/migrate/from-operator.md index f035f95484..58c62f792e 100644 --- a/docs/sources/flow/tasks/migrate/from-operator.md +++ b/docs/sources/tasks/migrate/from-operator.md @@ -1,15 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-operator/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-operator/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-operator/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-operator/ -- ../../getting-started/migrating-from-operator/ # /docs/agent/latest/flow/getting-started/migrating-from-operator/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-operator/ -description: Migrate from Grafana Agent Operator to Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/tasks/migrate/from-operator/ +description: Migrate from Grafana Agent Operator to Grafana Alloy menuTitle: Migrate from Operator -title: Migrate from Grafana Agent Operator to Grafana Agent Flow +title: Migrate from Grafana Agent Operator to Grafana Alloy weight: 320 --- @@ -275,38 +268,24 @@ The logging subsystem is very powerful and has many options for processing logs. ## Integrations The `Integration` CRD isn't supported with {{< param "PRODUCT_NAME" >}}. -However, all static mode integrations have an equivalent component in the [`prometheus.exporter`][] namespace. +However, all static mode integrations have an equivalent component in the [`prometheus.exporter`][prometheus.exporter] namespace. The [reference documentation][component documentation] should help convert those integrations to their {{< param "PRODUCT_NAME" >}} equivalent. -[default values]: https://github.com/grafana/agent/blob/main/operations/helm/charts/grafana-agent/values.yaml - -{{% docs/reference %}} -[clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering" -[clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering" -[deployment guide]: "/docs/agent/ -> /docs/agent//flow/get-started/deploy-agent" -[deployment guide]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/get-started/deploy-agent" -[operator guide]: "/docs/agent/ -> /docs/agent//operator/deploy-agent-operator-resources.md#deploy-a-metricsinstance-resource" -[operator guide]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/operator/deploy-agent-operator-resources.md#deploy-a-metricsinstance-resource" -[Helm chart]: "/docs/agent/ -> /docs/agent//flow/get-started/install/kubernetes" -[Helm chart]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/get-started/install/kubernetes" -[remote.kubernetes.secret]: "/docs/agent/ -> /docs/agent//flow/reference/components/remote.kubernetes.secret.md" -[remote.kubernetes.secret]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/remote.kubernetes.secret.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/prometheus.remote_write.md" -[prometheus.operator.podmonitors]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.podmonitors.md" -[prometheus.operator.podmonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/prometheus.operator.podmonitors.md" -[prometheus.operator.servicemonitors]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.servicemonitors.md" -[prometheus.operator.servicemonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/prometheus.operator.servicemonitors.md" -[prometheus.operator.probes]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.probes.md" -[prometheus.operator.probes]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/prometheus.operator.probes.md" -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/prometheus.scrape" -[loki.source.kubernetes]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.source.kubernetes.md" -[loki.source.kubernetes]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/loki.source.kubernetes.md" -[loki.source.podlogs]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.source.podlogs.md" -[loki.source.podlogs]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/loki.source.podlogs.md" -[component documentation]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[component documentation]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components" -[`prometheus.exporter`]: "/docs/agent/ -> /docs/agent//flow/reference/components" -[`prometheus.exporter`]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components" -{{% /docs/reference %}} + +[default values]: https://github.com/grafana/alloy/blob/main/operations/helm/charts/grafana-agent/values.yaml +[clustering]: ../../../concepts/clustering/ +[deployment guide]: ../../../get-started/deploy-alloy + +[operator guide]: https://grafana.com/docs/agent/latest/operator/deploy-agent-operator-resources/#deploy-a-metricsinstance-resource + +[Helm chart]: ../../../get-started/install/kubernetes/ +[remote.kubernetes.secret]: ../../../reference/components/remote.kubernetes.secret/ +[prometheus.remote_write]: ../../../reference/components/prometheus.remote_write/ +[prometheus.operator.podmonitors]: ../../../reference/components/prometheus.operator.podmonitors/ +[prometheus.operator.servicemonitors]: ../../../reference/components/prometheus.operator.servicemonitors/ +[prometheus.operator.probes]: ../../../reference/components/prometheus.operator.probes/ +[prometheus.scrape]: ../../../reference/components/prometheus.scrape/ +[loki.source.kubernetes]: ../../../reference/components/loki.source.kubernetes/ +[loki.source.podlogs]: ../../../reference/components/loki.source.podlogs/ +[component documentation]: ../../../reference/components/ +[prometheus.exporter]: ../../../reference/components/ diff --git a/docs/sources/flow/tasks/migrate/from-prometheus.md b/docs/sources/tasks/migrate/from-prometheus.md similarity index 71% rename from docs/sources/flow/tasks/migrate/from-prometheus.md rename to docs/sources/tasks/migrate/from-prometheus.md index 84241791ec..d5bdc6bd82 100644 --- a/docs/sources/flow/tasks/migrate/from-prometheus.md +++ b/docs/sources/tasks/migrate/from-prometheus.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/migrate/from-prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/from-prometheus/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-prometheus/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/migrating-from-prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-prometheus/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-prometheus/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-prometheus/ -- ../../getting-started/migrating-from-prometheus/ # /docs/agent/latest/flow/getting-started/migrating-from-prometheus/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-prometheus/ -description: Learn how to migrate from Prometheus to Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/tasks/migrate/from-prometheus/ +description: Learn how to migrate from Prometheus to Grafana Alloy menuTitle: Migrate from Prometheus -title: Migrate from Prometheus to Grafana Agent Flow +title: Migrate from Prometheus to Grafana Alloy weight: 320 --- @@ -71,10 +60,10 @@ This conversion will enable you to take full advantage of the many additional fe 1. If the `convert` command can't convert a Prometheus configuration, diagnostic information is sent to `stderr`.\ You can bypass any non-critical issues and output the {{< param "PRODUCT_NAME" >}} configuration using a best-effort conversion by including the `--bypass-errors` flag. - {{< admonition type="caution" >}} - If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. - Make sure you fully test the converted configuration before using it in a production environment. - {{< /admonition >}} + {{< admonition type="caution" >}} + If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. + Make sure you fully test the converted configuration before using it in a production environment. + {{< /admonition >}} {{< code >}} @@ -131,14 +120,14 @@ This allows you to try {{< param "PRODUCT_NAME" >}} without modifying your exist > In this task, you will use the [run][] CLI command to run {{< param "PRODUCT_NAME" >}} > using a Prometheus configuration. -[Run][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=prometheus`. +[Run][run alloy] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=prometheus`. Your configuration file must be a valid Prometheus configuration file rather than a {{< param "PRODUCT_NAME" >}} configuration file. ### Debugging 1. You can follow the convert CLI command [debugging][] instructions to generate a diagnostic report. -1. Refer to the {{< param "PRODUCT_NAME" >}} [Debugging][DebuggingUI] for more information about a running {{< param "PRODUCT_NAME" >}}. +1. Refer to [Debug {{< param "PRODUCT_NAME" >}}][DebuggingUI] for more information about a running {{< param "PRODUCT_NAME" >}}. 1. If your Prometheus configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. You can bypass any non-critical issues and start the Agent by including the `--config.bypass-conversion-errors` flag in addition to `--config.format=prometheus`. @@ -244,24 +233,12 @@ The following list is specific to the convert command and not {{< param "PRODUCT [Prometheus]: https://prometheus.io/docs/prometheus/latest/configuration/configuration/ [debugging]: #debugging [example]: #example - -{{% docs/reference %}} -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[convert]: "/docs/agent/ -> /docs/agent//flow/reference/cli/convert.md" -[convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert.md" -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/" -[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md" -[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md" -[River]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/_index.md" -[River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/_index.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug#grafana-agent-flow-ui" -{{% /docs/reference %}} +[prometheus.scrape]: ../../../reference/components/prometheus.scrape/ +[prometheus.remote_write]: ../../../reference/components/prometheus.remote_write/ +[Components]: ../../../concepts/components/ +[convert]: ../../../reference/cli/convert/ +[run]: ../../../reference/cli/run/ +[run alloy]: ../../../get-started/run/ +[DebuggingUI]: ../../tasks/debug/ +[River]: ../../../concepts/config-language/ +[UI]: ../../debug/#grafana-alloy-ui diff --git a/docs/sources/flow/tasks/migrate/from-promtail.md b/docs/sources/tasks/migrate/from-promtail.md similarity index 73% rename from docs/sources/flow/tasks/migrate/from-promtail.md rename to docs/sources/tasks/migrate/from-promtail.md index 7a0dda9b92..6699ed03f2 100644 --- a/docs/sources/flow/tasks/migrate/from-promtail.md +++ b/docs/sources/tasks/migrate/from-promtail.md @@ -1,19 +1,8 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/migrate/from-promtail/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-promtail/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/from-promtail/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-promtail/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/migrating-from-promtail/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-promtail/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-promtail/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-promtail/ -- ../../getting-started/migrating-from-promtail/ # /docs/agent/latest/flow/getting-started/migrating-from-promtail/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-promtail/ -description: Learn how to migrate from Promtail to Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/tasks/migrate/from-promtail/ +description: Learn how to migrate from Promtail to Grafana Alloy menuTitle: Migrate from Promtail -title: Migrate from Promtail to Grafana Agent Flow +title: Migrate from Promtail to Grafana Alloy weight: 330 --- @@ -64,7 +53,7 @@ This conversion will enable you to take full advantage of the many additional fe * _``_: The full path to the Promtail configuration. * _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. -1. [Run][] {{< param "PRODUCT_NAME" >}} using the new configuration from _``_: +1. [Run][run alloy] {{< param "PRODUCT_NAME" >}} using the new configuration from _``_: ### Debugging @@ -127,7 +116,7 @@ This allows you to try {{< param "PRODUCT_NAME" >}} without modifying your exist > In this task, you will use the [run][] CLI command to run {{< param "PRODUCT_NAME" >}} using a Promtail configuration. -[Run][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=promtail`. +[Run][run alloy] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=promtail`. Your configuration file must be a valid Promtail configuration file rather than a {{< param "PRODUCT_NAME" >}} configuration file. ### Debugging @@ -225,26 +214,13 @@ The following list is specific to the convert command and not {{< param "PRODUCT [Promtail]: https://www.grafana.com/docs/loki//clients/promtail/ [debugging]: #debugging [expanded in the configuration file]: https://www.grafana.com/docs/loki//clients/promtail/configuration/#use-environment-variables-in-the-configuration - -{{% docs/reference %}} -[local.file_match]: "/docs/agent/ -> /docs/agent//flow/reference/components/local.file_match.md" -[local.file_match]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/local.file_match.md" -[loki.source.file]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.source.file.md" -[loki.source.file]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.file.md" -[loki.write]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.write.md" -[loki.write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.write.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[convert]: "/docs/agent/ -> /docs/agent//flow/reference/cli/convert.md" -[convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert.md" -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/" -[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md" -[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md" -[River]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/_index.md" -[River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/_index.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug#grafana-agent-flow-ui" -{{% /docs/reference %}} +[local.file_match]: ../../../reference/components/local.file_match/ +[loki.source.file]: ../../../reference/components/loki.source.file/ +[loki.write]: ../../../reference/components/loki.write/ +[Components]: ../../../concepts/components/ +[convert]: ../../../reference/cli/convert/ +[run]: ../../../reference/cli/run/ +[run alloy]: ../../../get-started/run/ +[DebuggingUI]: ../../../tasks/debug/ +[River]: ../../../concepts/config-language/ +[UI]: ../../tasks/debug/#grafana-alloy-ui diff --git a/docs/sources/flow/tasks/migrate/from-static.md b/docs/sources/tasks/migrate/from-static.md similarity index 60% rename from docs/sources/flow/tasks/migrate/from-static.md rename to docs/sources/tasks/migrate/from-static.md index 5d1b73626f..0e82ff92ac 100644 --- a/docs/sources/flow/tasks/migrate/from-static.md +++ b/docs/sources/tasks/migrate/from-static.md @@ -1,25 +1,14 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/migrate/from-static/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-static/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/from-static/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-static/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/migrating-from-static/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-static/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-static/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-static/ -- ../../getting-started/migrating-from-static/ # /docs/agent/latest/flow/getting-started/migrating-from-static/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-static/ -description: Learn how to migrate your configuration from Grafana Agent Static to Grafana Agent Flow -menuTitle: Migrate from Static to Flow -title: Migrate Grafana Agent Static to Grafana Agent Flow +canonical: https://grafana.com/docs/alloy/latest/tasks/migrate/from-static/ +description: Learn how to migrate your configuration from Grafana Agent Static to Grafana Alloy +menuTitle: Migrate from Grafana Agent Static +title: Migrate Grafana Agent Static to Grafana Alloy weight: 340 --- -# Migrate from {{% param "PRODUCT_ROOT_NAME" %}} Static to {{% param "PRODUCT_NAME" %}} +# Migrate from Grafana Agent Static to {{% param "PRODUCT_NAME" %}} -The built-in {{< param "PRODUCT_ROOT_NAME" >}} convert command can migrate your [Static][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. +The built-in {{< param "PRODUCT_ROOT_NAME" >}} convert command can migrate your [Grafana Agent Static][Static] configuration to a {{< param "PRODUCT_NAME" >}} configuration. This topic describes how to: @@ -42,7 +31,7 @@ This topic describes how to: ## Convert a Grafana Agent Static configuration -To fully migrate Grafana Agent [Static][] to {{< param "PRODUCT_NAME" >}}, you must convert your Static configuration into a {{< param "PRODUCT_NAME" >}} configuration. +To fully migrate Grafana Agent Static to {{< param "PRODUCT_NAME" >}}, you must convert your Grafana Agent Static configuration into a {{< param "PRODUCT_NAME" >}} configuration. This conversion will enable you to take full advantage of the many additional features available in {{< param "PRODUCT_NAME" >}}. > In this task, you will use the [convert][] CLI command to output a {{< param "PRODUCT_NAME" >}} @@ -64,14 +53,14 @@ This conversion will enable you to take full advantage of the many additional fe Replace the following: - * _``_: The full path to the [Static][] configuration. + * _``_: The full path to the Grafana Agent Static configuration. * _`_`: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. -1. [Run][] {{< param "PRODUCT_NAME" >}} using the new {{< param "PRODUCT_NAME" >}} configuration from _``_: +1. [Run][run alloy] {{< param "PRODUCT_NAME" >}} using the new {{< param "PRODUCT_NAME" >}} configuration from _``_: ### Debugging -1. If the convert command can't convert a [Static][] configuration, diagnostic information is sent to `stderr`. +1. If the convert command can't convert a Grafana Agent Static configuration, diagnostic information is sent to `stderr`. You can use the `--bypass-errors` flag to bypass any non-critical issues and output the {{< param "PRODUCT_NAME" >}} configuration using a best-effort conversion. {{< admonition type="caution" >}} @@ -93,7 +82,7 @@ This conversion will enable you to take full advantage of the many additional fe Replace the following: - * _``_: The full path to the [Static][] configuration. + * _``_: The full path to the Grafana Agent Static configuration. * _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. 1. You can use the `--report` flag to output a diagnostic report. @@ -112,7 +101,7 @@ This conversion will enable you to take full advantage of the many additional fe Replace the following: - * _``_: The full path to the [Static][] configuration. + * _``_: The full path to the Grafana Agent Static configuration. * _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. * _``_: The output path for the report. @@ -122,16 +111,16 @@ This conversion will enable you to take full advantage of the many additional fe (Warning) Please review your agent command line flags and ensure they are set in your {{< param "PRODUCT_NAME" >}} configuration file where necessary. ``` -## Run a Static mode configuration +## Run a Grafana Agent Static mode configuration If you’re not ready to completely switch to a {{< param "PRODUCT_NAME" >}} configuration, you can run {{< param "PRODUCT_ROOT_NAME" >}} using your existing Grafana Agent Static configuration. -The `--config.format=static` flag tells {{< param "PRODUCT_ROOT_NAME" >}} to convert your [Static] configuration to {{< param "PRODUCT_NAME" >}} and load it directly without saving the new configuration. +The `--config.format=static` flag tells {{< param "PRODUCT_ROOT_NAME" >}} to convert your Grafana Agent Static configuration to {{< param "PRODUCT_NAME" >}} and load it directly without saving the new configuration. This allows you to try {{< param "PRODUCT_NAME" >}} without modifying your existing Grafana Agent Static configuration infrastructure. > In this task, you will use the [run][] CLI command to run {{< param "PRODUCT_NAME" >}} using a Static configuration. [Run][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=static`. -Your configuration file must be a valid [Static] configuration file. +Your configuration file must be a valid Grafana Agent Static configuration file. ### Debugging @@ -139,7 +128,7 @@ Your configuration file must be a valid [Static] configuration file. 1. Refer to the {{< param "PRODUCT_NAME" >}} [debugging UI][DebuggingUI] for more information about running {{< param "PRODUCT_NAME" >}}. -1. If your [Static] configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. +1. If your Grafana Agent Static configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. You can use the `--config.bypass-conversion-errors` flag with `--config.format=static` to bypass any non-critical issues and start {{< param "PRODUCT_NAME" >}}. {{< admonition type="caution" >}} @@ -149,9 +138,9 @@ Your configuration file must be a valid [Static] configuration file. ## Example -This example demonstrates converting a [Static] configuration file to a {{< param "PRODUCT_NAME" >}} configuration file. +This example demonstrates converting a Grafana Agent Static configuration file to a {{< param "PRODUCT_NAME" >}} configuration file. -The following [Static] configuration file provides the input for the conversion. +The following Grafana Agent Static configuration file provides the input for the conversion. ```yaml server: @@ -223,7 +212,7 @@ grafana-agent-flow convert --source-format=static --output= Replace the following: -* _``_: The full path to the [Static][] configuration. +* _``_: The full path to the Grafana Agent Static configuration. * _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. The new {{< param "PRODUCT_NAME" >}} configuration file looks like this: @@ -320,9 +309,9 @@ grafana-agent-flow convert --source-format=static --extra-args="-enable-features {{< /code >}} Replace the following: - * _``_: The full path to the [Static][] configuration. + * _``_: The full path to the Grafana Agent Static configuration. * _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. - + ## Environment Vars You can use the `-config.expand-env` command line flag to interpret environment variables in your Grafana Agent Static configuration. @@ -343,7 +332,7 @@ The following list is specific to the convert command and not {{< param "PRODUCT Any additional unsupported features are returned as errors during conversion. * There is no gRPC server to configure for {{< param "PRODUCT_NAME" >}}, as any non-default configuration will show as unsupported during the conversion. * Check if you are using any extra command line arguments with Static that aren't present in your configuration file. For example, `-server.http.address`. -* Check if you are using any environment variables in your [Static][] configuration. +* Check if you are using any environment variables in your Grafana Agent Static configuration. These will be evaluated during conversion and you may want to replace them with the {{< param "PRODUCT_NAME" >}} Standard library [env][] function after conversion. * Review additional [Prometheus Limitations][] for limitations specific to your [Metrics][] configuration. * Review additional [Promtail Limitations][] for limitations specific to your [Logs][] configuration. @@ -353,49 +342,33 @@ The following list is specific to the convert command and not {{< param "PRODUCT [debugging]: #debugging [example]: #example -{{% docs/reference %}} -[Static]: "/docs/agent/ -> /docs/agent//static" -[Static]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static" -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write.md" -[local.file_match]: "/docs/agent/ -> /docs/agent//flow/reference/components/local.file_match.md" -[local.file_match]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/local.file_match.md" -[loki.process]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.process.md" -[loki.process]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.process.md" -[loki.source.file]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.source.file.md" -[loki.source.file]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.source.file.md" -[loki.write]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.write.md" -[loki.write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.write.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[convert]: "/docs/agent/ -> /docs/agent//flow/reference/cli/convert.md" -[convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert.md" -[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" -[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/" -[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/" -[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md" -[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md" -[River]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/" -[River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/" -[Integrations next]: "/docs/agent/ -> /docs/agent//static/configuration/integrations/integrations-next/_index.md" -[Integrations next]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/traces-config.md -[Traces]: "/docs/agent/ -> /docs/agent//static/configuration/traces-config.md" -[Traces]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/traces-config.md" -[Agent Management]: "/docs/agent/ -> /docs/agent//static/configuration/agent-management.md" -[Agent Management]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/agent-management.md" -[env]: "/docs/agent/ -> /docs/agent//flow/reference/stdlib/env.md" -[env]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/env.md" -[Prometheus Limitations]: "/docs/agent/ -> /docs/agent//flow/tasks/migrate/from-prometheus.md#limitations" -[Prometheus Limitations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-prometheus.md#limitations" -[Promtail Limitations]: "/docs/agent/ -> /docs/agent//flow/tasks/migrate/from-promtail.md#limitations" -[Promtail Limitations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-promtail.md#limitations" -[Metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config.md" -[Metrics]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/metrics-config.md" -[Logs]: "/docs/agent/ -> /docs/agent//static/configuration/logs-config.md" -[Logs]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/logs-config.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug#grafana-agent-flow-ui" -{{% /docs/reference %}} + +[Static]: https://grafana.com/docs/agent/latest/static + +[prometheus.scrape]: ../../../reference/components/prometheus.scrape/ +[prometheus.remote_write]: ../../../reference/components/prometheus.remote_write/ +[local.file_match]: ../../../reference/components/local.file_match/ +[loki.process]: ../../../reference/components/loki.process/ +[loki.source.file]: ../../../reference/components/loki.source.file/ +[loki.write]: ../../../reference/components/loki.write/ +[Components]: ../../../concepts/components/ +[convert]: ../../../reference/cli/convert/ +[run]: ../../../reference/cli/run/ +[run alloy]: ../../../get-started/run/ +[DebuggingUI]: ../../debug/ +[River]: ../../../concepts/config-language/ + + +[Integrations next]: https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/ +[Traces]: https://grafana.com/docs/agent/latest/static/configuration/traces-config/ +[Agent Management]: https://grafana.com/docs/agent/latest/static/configuration/agent-management/ + +[env]: ../../../reference/stdlib/env/ +[Prometheus Limitations]: ../from-prometheus/#limitations +[Promtail Limitations]: ../from-promtail/#limitations + + +[Metrics]: https://grafana.com/docs/agent/latest/static/configuration/metrics-config/ +[Logs]: https://grafana.com/docs/agent/latest/static/configuration/logs-config/ + +[UI]: ../../debug/#grafana-agent-flow-ui diff --git a/docs/sources/tasks/monitor/_index.md b/docs/sources/tasks/monitor/_index.md new file mode 100644 index 0000000000..2dd265cfe1 --- /dev/null +++ b/docs/sources/tasks/monitor/_index.md @@ -0,0 +1,13 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/monitor/ +description: Learn about monitoring Grafana Alloy +title: Monitor Grafana Alloy +menuTitle: Monitor +weight: 110 +--- + +# How to monitor {{% param "PRODUCT_NAME" %}} + +This section details various ways to monitor and debug {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/tasks/monitor/component_metrics.md b/docs/sources/tasks/monitor/component_metrics.md new file mode 100644 index 0000000000..65cdf81261 --- /dev/null +++ b/docs/sources/tasks/monitor/component_metrics.md @@ -0,0 +1,28 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/monitor/component_metrics/ +description: Learn how to monitor component metrics +title: Monitor components +weight: 200 +--- + +# How to monitor components + +{{< param "PRODUCT_NAME" >}} [components][] may optionally expose Prometheus metrics which can be used to investigate the behavior of that component. +These component-specific metrics are only generated when an instance of that component is running. + +> Component-specific metrics are different than any metrics being processed by the component. +> Component-specific metrics are used to expose the state of a component for observability, alerting, and debugging. + +Component-specific metrics are exposed at the `/metrics` HTTP endpoint of the {{< param "PRODUCT_NAME" >}} HTTP server, which defaults to listening on `http://localhost:12345`. + +> The documentation for the [`grafana-agent run`][grafana-agent run] command describes how to modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. + +Component-specific metrics have a `component_id` label matching the component ID generating those metrics. +For example, component-specific metrics for a `prometheus.remote_write` component labeled `production` will have a `component_id` label with the value `prometheus.remote_write.production`. + +The [reference documentation][] for each component described the list of component-specific metrics that the component exposes. +Not all components expose metrics. + +[components]: ../../../concepts/components/ +[grafana-agent run]: ../../../reference/cli/run/ +[reference documentation]: ../../../reference/components/ diff --git a/docs/sources/tasks/monitor/controller_metrics.md b/docs/sources/tasks/monitor/controller_metrics.md new file mode 100644 index 0000000000..6ce2bf5010 --- /dev/null +++ b/docs/sources/tasks/monitor/controller_metrics.md @@ -0,0 +1,27 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/monitor/controller_metrics/ +description: Learn how to monitor controller metrics +title: Monitor controller +weight: 100 +--- + +# How to monitor controller + +The {{< param "PRODUCT_NAME" >}} [component controller][] exposes Prometheus metrics which you can use to investigate the controller state. + +Metrics for the controller are exposed at the `/metrics` HTTP endpoint of the {{< param "PRODUCT_NAME" >}} HTTP server, which defaults to listening on `http://localhost:12345`. + +> The documentation for the [`grafana-agent run`][grafana-agent run] command describes how to modify the address {{< param "PRODUCT_NAME" >}} listens on for HTTP traffic. + +The controller exposes the following metrics: + +* `agent_component_controller_evaluating` (Gauge): Set to `1` whenever the component controller is currently evaluating components. + This value may be misrepresented depending on how fast evaluations complete or how often evaluations occur. +* `agent_component_controller_running_components` (Gauge): The current number of running components by health. + The health is represented in the `health_type` label. +* `agent_component_evaluation_seconds` (Histogram): The time it takes to evaluate components after one of their dependencies is updated. +* `agent_component_dependencies_wait_seconds` (Histogram): Time spent by components waiting to be evaluated after one of their dependencies is updated. +* `agent_component_evaluation_queue_size` (Gauge): The current number of component evaluations waiting to be performed. + +[component controller]: ../../../concepts/component_controller/ +[grafana-agent run]: ../../../reference/cli/run/ diff --git a/docs/sources/flow/tasks/opentelemetry-to-lgtm-stack.md b/docs/sources/tasks/opentelemetry-to-lgtm-stack.md similarity index 76% rename from docs/sources/flow/tasks/opentelemetry-to-lgtm-stack.md rename to docs/sources/tasks/opentelemetry-to-lgtm-stack.md index 2da9790783..7d78626a36 100644 --- a/docs/sources/flow/tasks/opentelemetry-to-lgtm-stack.md +++ b/docs/sources/tasks/opentelemetry-to-lgtm-stack.md @@ -1,18 +1,6 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tasks/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/send-data/agent/flow/tasks/opentelemetry-to-lgtm-stack/ -# Previous page aliases for backwards compatibility: -- /docs/grafana-cloud/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ -- ../getting-started/opentelemetry-to-lgtm-stack/ # /docs/agent/latest/flow/getting-started/opentelemetry-to-lgtm-stack/ -canonical: https://grafana.com/docs/agent/latest/flow/tasks/opentelemetry-to-lgtm-stack/ -description: Learn how to collect OpenTelemetry data and forward it to the Grafana - stack +canonical: https://grafana.com/docs/alloy/latest/tasks/opentelemetry-to-lgtm-stack/ +description: Learn how to collect OpenTelemetry data and forward it to the Grafana stack title: OpenTelemetry to Grafana stack weight: 350 --- @@ -44,7 +32,8 @@ This topic describes how to: * Have a set of OpenTelemetry applications ready to push telemetry data to {{< param "PRODUCT_NAME" >}}. * Identify where {{< param "PRODUCT_NAME" >}} will write received telemetry data. * Be familiar with the concept of [Components][] in {{< param "PRODUCT_NAME" >}}. -* Complete the [Collect open telemetry data][] task. You will pick up from where that guide ended. +* Complete the [Collect open telemetry data][] task. + You will pick up from where that guide ended. ## The pipeline @@ -98,7 +87,7 @@ Traces: OTel → batch processor → OTel exporter ## Grafana Loki [Grafana Loki][] is a horizontally scalable, highly available, multi-tenant log aggregation system inspired by Prometheus. -Similar to Prometheus, to send from OTLP to Loki, you can do a passthrough from the [otelcol.exporter.loki] component to [loki.write] component. +Similar to Prometheus, to send from OTLP to Loki, you can do a passthrough from the [otelcol.exporter.loki][] component to [loki.write][] component. ```river otelcol.exporter.loki "default" { @@ -310,7 +299,7 @@ ts=2023-05-09T09:37:15.304109Z component=otelcol.receiver.otlp.default level=inf ts=2023-05-09T09:37:15.304234Z component=otelcol.receiver.otlp.default level=info msg="Starting HTTP server" endpoint=0.0.0.0:4318 ``` -You can now check the pipeline graphically by visiting http://localhost:12345/graph +You can now check the pipeline graphically by visiting [http://localhost:12345/graph][] ![](../../../assets/tasks/otlp-lgtm-graph.png) @@ -320,26 +309,14 @@ You can now check the pipeline graphically by visiting http://localhost:12345/gr [Grafana Cloud Portal]: https://grafana.com/docs/grafana-cloud/account-management/cloud-portal#your-grafana-cloud-stack [Prometheus Remote Write]: https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage [Grafana Mimir]: https://grafana.com/oss/mimir/ - -{{% docs/reference %}} -[Collect open telemetry data]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-opentelemetry-data.md" -[Collect open telemetry data]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/collect-opentelemetry-data.md" -[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" -[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" -[loki.write]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.write.md" -[loki.write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/loki.write.md" -[otelcol.auth.basic]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.auth.basic.md" -[otelcol.auth.basic]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.auth.basic.md" -[otelcol.exporter.loki]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.loki.md" -[otelcol.exporter.loki]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.loki.md" -[otelcol.exporter.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.otlp.md" -[otelcol.exporter.otlp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.otlp.md" -[otelcol.exporter.prometheus]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.prometheus.md" -[otelcol.exporter.prometheus]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.exporter.prometheus.md" -[otelcol.processor.batch]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.processor.batch.md" -[otelcol.processor.batch]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.batch.md" -[otelcol.receiver.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.receiver.otlp.md" -[otelcol.receiver.otlp]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.receiver.otlp.md" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write.md" -{{% /docs/reference %}} +[Collect open telemetry data]: ../collect-opentelemetry-data/ +[Components]: ../../concepts/components/ +[loki.write]: ../../reference/components/loki.write/ +[otelcol.auth.basic]: ../../reference/components/otelcol.auth.basic/ +[otelcol.exporter.loki]: ../../reference/components/otelcol.exporter.loki/ +[otelcol.exporter.otlp]: ../../reference/components/otelcol.exporter.otlp/ +[otelcol.exporter.prometheus]: ../../reference/components/otelcol.exporter.prometheus/ +[otelcol.processor.batch]: ../../reference/components/otelcol.processor.batch/ +[otelcol.receiver.otlp]: ../../reference/components/otelcol.receiver.otlp/ +[prometheus.remote_write]: ../../reference/components/prometheus.remote_write/ +[http://localhost:12345/graph]: http://localhost:12345/graph diff --git a/docs/sources/tutorials/_index.md b/docs/sources/tutorials/_index.md new file mode 100644 index 0000000000..03760aa536 --- /dev/null +++ b/docs/sources/tutorials/_index.md @@ -0,0 +1,12 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tutorials/ +description: Learn how to use Grafana Alloy +title: Tutorials +weight: 300 +--- + +# Tutorials + +This section provides tutorials for learning how to use {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/flow/tutorials/assets/docker-compose.yaml b/docs/sources/tutorials/assets/docker-compose.yaml similarity index 100% rename from docs/sources/flow/tutorials/assets/docker-compose.yaml rename to docs/sources/tutorials/assets/docker-compose.yaml diff --git a/docs/sources/flow/tutorials/assets/flow_configs/agent.river b/docs/sources/tutorials/assets/flow_configs/agent.river similarity index 100% rename from docs/sources/flow/tutorials/assets/flow_configs/agent.river rename to docs/sources/tutorials/assets/flow_configs/agent.river diff --git a/docs/sources/flow/tutorials/assets/flow_configs/example.river b/docs/sources/tutorials/assets/flow_configs/example.river similarity index 100% rename from docs/sources/flow/tutorials/assets/flow_configs/example.river rename to docs/sources/tutorials/assets/flow_configs/example.river diff --git a/docs/sources/flow/tutorials/assets/flow_configs/multiple-inputs.river b/docs/sources/tutorials/assets/flow_configs/multiple-inputs.river similarity index 100% rename from docs/sources/flow/tutorials/assets/flow_configs/multiple-inputs.river rename to docs/sources/tutorials/assets/flow_configs/multiple-inputs.river diff --git a/docs/sources/flow/tutorials/assets/flow_configs/relabel.river b/docs/sources/tutorials/assets/flow_configs/relabel.river similarity index 100% rename from docs/sources/flow/tutorials/assets/flow_configs/relabel.river rename to docs/sources/tutorials/assets/flow_configs/relabel.river diff --git a/docs/sources/flow/tutorials/assets/generate.sh b/docs/sources/tutorials/assets/generate.sh similarity index 100% rename from docs/sources/flow/tutorials/assets/generate.sh rename to docs/sources/tutorials/assets/generate.sh diff --git a/docs/sources/flow/tutorials/assets/grafana/config/grafana.ini b/docs/sources/tutorials/assets/grafana/config/grafana.ini similarity index 100% rename from docs/sources/flow/tutorials/assets/grafana/config/grafana.ini rename to docs/sources/tutorials/assets/grafana/config/grafana.ini diff --git a/docs/sources/flow/tutorials/assets/grafana/dashboards-provisioning/dashboards.yaml b/docs/sources/tutorials/assets/grafana/dashboards-provisioning/dashboards.yaml similarity index 100% rename from docs/sources/flow/tutorials/assets/grafana/dashboards-provisioning/dashboards.yaml rename to docs/sources/tutorials/assets/grafana/dashboards-provisioning/dashboards.yaml diff --git a/docs/sources/flow/tutorials/assets/grafana/dashboards/agent.json b/docs/sources/tutorials/assets/grafana/dashboards/agent.json similarity index 100% rename from docs/sources/flow/tutorials/assets/grafana/dashboards/agent.json rename to docs/sources/tutorials/assets/grafana/dashboards/agent.json diff --git a/docs/sources/flow/tutorials/assets/grafana/dashboards/template.jsonnet b/docs/sources/tutorials/assets/grafana/dashboards/template.jsonnet similarity index 100% rename from docs/sources/flow/tutorials/assets/grafana/dashboards/template.jsonnet rename to docs/sources/tutorials/assets/grafana/dashboards/template.jsonnet diff --git a/docs/sources/flow/tutorials/assets/grafana/datasources/datasource.yml b/docs/sources/tutorials/assets/grafana/datasources/datasource.yml similarity index 100% rename from docs/sources/flow/tutorials/assets/grafana/datasources/datasource.yml rename to docs/sources/tutorials/assets/grafana/datasources/datasource.yml diff --git a/docs/sources/flow/tutorials/assets/mimir/mimir.yaml b/docs/sources/tutorials/assets/mimir/mimir.yaml similarity index 100% rename from docs/sources/flow/tutorials/assets/mimir/mimir.yaml rename to docs/sources/tutorials/assets/mimir/mimir.yaml diff --git a/docs/sources/flow/tutorials/assets/runt.sh b/docs/sources/tutorials/assets/runt.sh similarity index 100% rename from docs/sources/flow/tutorials/assets/runt.sh rename to docs/sources/tutorials/assets/runt.sh diff --git a/docs/sources/flow/tutorials/chaining.md b/docs/sources/tutorials/chaining.md similarity index 64% rename from docs/sources/flow/tutorials/chaining.md rename to docs/sources/tutorials/chaining.md index 9be20dbc3a..63a97cabf6 100644 --- a/docs/sources/flow/tutorials/chaining.md +++ b/docs/sources/tutorials/chaining.md @@ -1,11 +1,5 @@ --- -aliases: -- ./chaining/ -- /docs/grafana-cloud/agent/flow/tutorials/chaining/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/chaining/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/chaining/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/chaining/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/chaining/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/chaining/ description: Learn how to chain Prometheus components menuTitle: Chain Prometheus components title: Chain Prometheus components @@ -16,7 +10,8 @@ weight: 400 This tutorial shows how to use [multiple-inputs.river][] to send data to several different locations. This tutorial uses the same base as [Filtering metrics][]. -A new concept introduced in Flow is chaining components together in a composable pipeline. This promotes the reusability of components while offering flexibility. +A new concept introduced in {{< param "PRODUCT_NAME" >}} is chaining components together in a composable pipeline. +This promotes the reusability of components while offering flexibility. ## Prerequisites @@ -33,10 +28,11 @@ curl https://raw.githubusercontent.com/grafana/agent/main/docs/sources/flow/tuto The `runt.sh` script does: 1. Downloads the configurations necessary for Mimir, Grafana, and {{< param "PRODUCT_ROOT_NAME" >}}. -2. Downloads the docker image for {{< param "PRODUCT_ROOT_NAME" >}} explicitly. -3. Runs the `docker-compose up` command to bring all the services up. +1. Downloads the docker image for {{< param "PRODUCT_ROOT_NAME" >}} explicitly. +1. Runs the `docker-compose up` command to bring all the services up. -Allow {{< param "PRODUCT_ROOT_NAME" >}} to run for two minutes, then navigate to [Grafana][] to see {{< param "PRODUCT_ROOT_NAME" >}} scrape metrics. The [node_exporter][] metrics also show up now. +Allow {{< param "PRODUCT_ROOT_NAME" >}} to run for two minutes, then navigate to [Grafana][] to see {{< param "PRODUCT_ROOT_NAME" >}} scrape metrics. +The [node_exporter][] metrics also show up now. There are two scrapes each sending metrics to one filter. Note the `job` label lists the full name of the scrape component. @@ -74,7 +70,8 @@ prometheus.remote_write "prom" { } ``` -In the Flow block, `prometheus.relabel.service` is being forwarded metrics from two sources `prometheus.scrape.agent` and `prometheus.exporter.unix.default`. This allows for a single relabel component to be used with any number of inputs. +In the {{< param "PRODUCT_ROOT_NAME" >}} block, `prometheus.relabel.service` is being forwarded metrics from two sources `prometheus.scrape.agent` and `prometheus.exporter.unix default`. +This allows for a single relabel component to be used with any number of inputs. ## Adding another relabel @@ -82,11 +79,7 @@ In `multiple-input.river` add a new `prometheus.relabel` component that adds a ` ![Add a new label with the value v2](/media/docs/agent/screenshot-grafana-agent-chaining-scrape-v2.png) -[multiple-inputs.river]: https://grafana.com/docs/agent//flow/tutorials/assets/flow_configs/multiple-inputs.river +[multiple-inputs.river]: ../assets/flow_configs/multiple-inputs.river +[Filtering metrics]: ../filtering-metrics/ [Grafana]: http://localhost:3000/explore?orgId=1&left=%5B%22now-1h%22,%22now%22,%22Mimir%22,%7B%22refId%22:%22A%22,%22instant%22:true,%22range%22:true,%22exemplar%22:true,%22expr%22:%22agent_build_info%7B%7D%22%7D%5D [node_exporter]: http://localhost:3000/explore?orgId=1&left=%5B%22now-1h%22,%22now%22,%22Mimir%22,%7B%22refId%22:%22A%22,%22instant%22:true,%22range%22:true,%22exemplar%22:true,%22expr%22:%22node_cpu_seconds_total%22%7D%5D - -{{% docs/reference %}} -[Filtering metrics]: "/docs/agent/ -> /docs/agent//flow/tutorials/filtering-metrics.md" -[Filtering metrics]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tutorials/filtering-metrics.md" -{{% /docs/reference %}} diff --git a/docs/sources/flow/tutorials/collecting-prometheus-metrics.md b/docs/sources/tutorials/collecting-prometheus-metrics.md similarity index 60% rename from docs/sources/flow/tutorials/collecting-prometheus-metrics.md rename to docs/sources/tutorials/collecting-prometheus-metrics.md index a665474190..d5600a4010 100644 --- a/docs/sources/flow/tutorials/collecting-prometheus-metrics.md +++ b/docs/sources/tutorials/collecting-prometheus-metrics.md @@ -1,11 +1,5 @@ --- -aliases: -- ./collecting-prometheus-metrics/ -- /docs/grafana-cloud/agent/flow/tutorials/collecting-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/collecting-prometheus-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/collecting-prometheus-metrics/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/collecting-prometheus-metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/collecting-prometheus-metrics/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/collecting-prometheus-metrics/ description: Learn how to collect Prometheus metrics menuTitle: Collect Prometheus metrics title: Collect Prometheus metrics @@ -14,7 +8,8 @@ weight: 200 # Collect Prometheus metrics -{{< param "PRODUCT_ROOT_NAME" >}} is a telemetry collector with the primary goal of moving telemetry data from one location to another. In this tutorial, you'll set up {{< param "PRODUCT_NAME" >}}. +{{< param "PRODUCT_ROOT_NAME" >}} is a telemetry collector with the primary goal of moving telemetry data from one location to another. +In this tutorial, you'll set up {{< param "PRODUCT_NAME" >}}. ## Prerequisites @@ -31,8 +26,8 @@ curl https://raw.githubusercontent.com/grafana/agent/main/docs/sources/flow/tuto The `runt.sh` script does: 1. Downloads the configurations necessary for Mimir, Grafana, and {{< param "PRODUCT_ROOT_NAME" >}}. -2. Downloads the docker image for {{< param "PRODUCT_ROOT_NAME" >}} explicitly. -3. Runs the docker-compose up command to bring all the services up. +1. Downloads the docker image for {{< param "PRODUCT_ROOT_NAME" >}} explicitly. +1. Runs the docker-compose up command to bring all the services up. Allow {{< param "PRODUCT_ROOT_NAME" >}} to run for two minutes, then navigate to [Grafana][]. @@ -44,7 +39,8 @@ Navigate to `http://localhost:12345/graph` to view the {{< param "PRODUCT_NAME" ![The User Interface](/media/docs/agent/screenshot-grafana-agent-collect-metrics-graph.png) -{{< param "PRODUCT_ROOT_NAME" >}} displays the component pipeline in a dependency graph. See [Scraping component](#scraping-component) and [Remote Write component](#remote-write-component) for details about the components used in this configuration. +{{< param "PRODUCT_ROOT_NAME" >}} displays the component pipeline in a dependency graph. +See [Scraping component](#scraping-component) and [Remote Write component](#remote-write-component) for details about the components used in this configuration. Click the nodes to navigate to the associated component page. There, you can view the state, health information, and, if applicable, the debug information. ![Component information](/media/docs/agent/screenshot-grafana-agent-collect-metrics-comp-info.png) @@ -67,11 +63,14 @@ prometheus.scrape "default" { } ``` -The `prometheus.scrape "default"` annotation indicates the name of the component, `prometheus.scrape`, and its label, `default`. All components must have a unique combination of name and if applicable label. +The `prometheus.scrape "default"` annotation indicates the name of the component, `prometheus.scrape`, and its label, `default`. +All components must have a unique combination of name and if applicable label. -The `targets` [attribute][] is an [argument][]. `targets` is a list of labels that specify the target via the special key `__address__`. The scraper is targeting the {{< param "PRODUCT_NAME" >}} `/metrics` endpoint. Both `http` and `/metrics` are implied but can be overridden. +The `targets` [attribute][] is an [argument][]. `targets` is a list of labels that specify the target via the special key `__address__`. +The scraper is targeting the {{< param "PRODUCT_NAME" >}} `/metrics` endpoint. Both `http` and `/metrics` are implied but can be overridden. -The `forward_to` attribute is an argument that references the [export][] of the `prometheus.remote_write.prom` component. This is where the scraper will send the metrics for further processing. +The `forward_to` attribute is an argument that references the [export][] of the `prometheus.remote_write.prom` component. +This is where the scraper will send the metrics for further processing. ## Remote Write component @@ -95,16 +94,8 @@ To try out {{< param "PRODUCT_ROOT_NAME" >}} without using Docker: [Docker]: https://www.docker.com/products/docker-desktop [Grafana]: http://localhost:3000/explore?orgId=1&left=%5B%22now-1h%22,%22now%22,%22Mimir%22,%7B%22refId%22:%22A%22,%22instant%22:true,%22range%22:true,%22exemplar%22:true,%22expr%22:%22agent_build_info%7B%7D%22%7D%5D - -{{% docs/reference %}} -[prometheus.scrape]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.scrape.md" -[prometheus.scrape]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.scrape.md" -[attribute]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/#attributes" -[attribute]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/#attributes" -[argument]: "/docs/agent/ -> /docs/agent//flow/concepts/components" -[argument]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components" -[export]: "/docs/agent/ -> /docs/agent//flow/concepts/components" -[export]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components" -[prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" -[prometheus.remote_write]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.remote_write.md" -{{% /docs/reference %}} +[prometheus.scrape]: ../../reference/components/prometheus.scrape/ +[attribute]: ../../concepts/config-language/#attributes +[argument]: ../../concepts/components/ +[export]: ../../concepts/components/ +[prometheus.remote_write]: ../../reference/components/prometheus.remote_write/ diff --git a/docs/sources/flow/tutorials/filtering-metrics.md b/docs/sources/tutorials/filtering-metrics.md similarity index 64% rename from docs/sources/flow/tutorials/filtering-metrics.md rename to docs/sources/tutorials/filtering-metrics.md index ec942124ec..5d36c45c13 100644 --- a/docs/sources/flow/tutorials/filtering-metrics.md +++ b/docs/sources/tutorials/filtering-metrics.md @@ -1,11 +1,5 @@ --- -aliases: -- ./filtering-metrics/ -- /docs/grafana-cloud/agent/flow/tutorials/filtering-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/filtering-metrics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/filtering-metrics/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/filtering-metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/filtering-metrics/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/filtering-metrics/ description: Learn how to filter Prometheus metrics menuTitle: Filter Prometheus metrics title: Filter Prometheus metrics @@ -14,7 +8,8 @@ weight: 300 # Filter Prometheus metrics -In this tutorial, you'll add a new component [prometheus.relabel][] using [relabel.river][] to filter metrics. This tutorial uses the same base as [Collecting Prometheus metrics][]. +In this tutorial, you'll add a new component [prometheus.relabel][] using [relabel.river][] to filter metrics. +This tutorial uses the same base as [Collect Prometheus metrics][]. ## Prerequisites @@ -53,14 +48,8 @@ Open the `relabel.river` file that was downloaded and change the name of the ser ![Updated dashboard showing api_server_v2](/media/docs/agent/screenshot-grafana-agent-filtering-metrics-transition.png) - [Docker]: https://www.docker.com/products/docker-desktop [Grafana]: http://localhost:3000/explore?orgId=1&left=%5B%22now-1h%22,%22now%22,%22Mimir%22,%7B%22refId%22:%22A%22,%22instant%22:true,%22range%22:true,%22exemplar%22:true,%22expr%22:%22agent_build_info%7B%7D%22%7D%5D -[relabel.river]: https://grafana.com/docs/agent//flow/tutorials/assets/flow_configs/relabel.river - -{{% docs/reference %}} -[prometheus.relabel]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.relabel.md" -[prometheus.relabel]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.relabel.md" -[Collecting Prometheus metrics]: "/docs/agent/ -> /docs/agent//flow/tutorials/collecting-prometheus-metrics.md" -[Collecting Prometheus metrics]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tutorials/collecting-prometheus-metrics.md" -{{% /docs/reference %}} +[relabel.river]: ../assets/flow_configs/relabel.river/ +[prometheus.relabel]: ../../reference/components/prometheus.relabel/ +[Collect Prometheus metrics]: ../collecting-prometheus-metrics diff --git a/docs/sources/tutorials/flow-by-example/_index.md b/docs/sources/tutorials/flow-by-example/_index.md new file mode 100644 index 0000000000..33f34193dd --- /dev/null +++ b/docs/sources/tutorials/flow-by-example/_index.md @@ -0,0 +1,12 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tutorials/flow-by-example/ +description: Learn how to use Grafana Alloy +title: Flow by example +weight: 100 +--- + +# Flow by example + +This section provides a set of step-by-step tutorials that show how to use {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md b/docs/sources/tutorials/flow-by-example/first-components-and-stdlib/index.md similarity index 58% rename from docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md rename to docs/sources/tutorials/flow-by-example/first-components-and-stdlib/index.md index 59bc59c5d1..363a4e8294 100644 --- a/docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md +++ b/docs/sources/tutorials/flow-by-example/first-components-and-stdlib/index.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/first-components-and-stdlib/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/first-components-and-stdlib/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/first-components-and-stdlib/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/first-components-and-stdlib/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/first-components-and-stdlib/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/flow-by-example/first-components-and-stdlib/ description: Learn about the basics of River and the configuration language title: First components and introducing the standard library weight: 20 @@ -12,20 +7,18 @@ weight: 20 # First components and the standard library -This tutorial covers the basics of the River language and the standard library. It introduces a basic pipeline that collects metrics from the host and sends them to Prometheus. +This tutorial covers the basics of the River language and the standard library. +It introduces a basic pipeline that collects metrics from the host and sends them to Prometheus. ## River basics -[Configuration language]: https://grafana.com/docs/agent//flow/concepts/config-language/ -[Configuration language concepts]: https://grafana.com/docs/agent//flow/concepts/configuration_language/ -[Standard library documentation]: https://grafana.com/docs/agent//flow/reference/stdlib/ - **Recommended reading** - [Configuration language][] - [Configuration language concepts][] -[River](https://github.com/grafana/river) is an HCL-inspired configuration language used to configure {{< param "PRODUCT_NAME" >}}. A River file is comprised of three things: +[River][] is an HCL-inspired configuration language used to configure {{< param "PRODUCT_NAME" >}}. +A River file is comprised of three things: 1. **Attributes** @@ -37,11 +30,15 @@ This tutorial covers the basics of the River language and the standard library. 1. **Expressions** - Expressions are used to compute values. They can be constant values (for example, `"localhost:9090"`), or they can be more complex (for example, referencing a component's export: `prometheus.exporter.unix.targets`. They can also be a mathematical expression: `(1 + 2) * 3`, or a standard library function call: `env("HOME")`). We will use more expressions as we go along the examples. If you are curious, you can find a list of available standard library functions in the [Standard library documentation][]. + Expressions are used to compute values. + They can be constant values (for example, `"localhost:9090"`), or they can be more complex (for example, referencing a component's export: `prometheus.exporter.unix.targets`. + They can also be a mathematical expression: `(1 + 2) * 3`, or a standard library function call: `env("HOME")`). We will use more expressions as we go along the examples. + If you are curious, you can find a list of available standard library functions in the [Standard library documentation][]. 1. **Blocks** - Blocks are used to configure components with groups of attributes or nested blocks. The following example block can be used to configure the logging output of {{< param "PRODUCT_NAME" >}}: + Blocks are used to configure components with groups of attributes or nested blocks. + The following example block can be used to configure the logging output of {{< param "PRODUCT_NAME" >}}: ```river logging { @@ -64,11 +61,6 @@ Comments in River are prefixed with `//` and are single-line only. For example: ## Components -[Components]: https://grafana.com/docs/agent//flow/concepts/components/ -[Component controller]: https://grafana.com/docs/agent//flow/concepts/component_controller/ -[Components configuration language]: https://grafana.com/docs/agent//flow/concepts/config-language/components/ -[env]: https://grafana.com/docs/agent//flow/reference/stdlib/env/ - **Recommended reading** - [Components][] @@ -97,31 +89,34 @@ prometheus.remote_write "local_prom" { ``` {{< admonition type="note" >}} -[Component reference]: https://grafana.com/docs/agent//flow/reference/components/ +A list of all available components can be found in the [Component reference][]. +Each component has a link to its documentation, which contains a description of what the component does, its arguments, its exports, and examples. -A list of all available components can be found in the [Component reference][]. Each component has a link to its documentation, which contains a description of what the component does, its arguments, its exports, and examples. +[Component reference]: ../../../reference/components/ {{< /admonition >}} -This pipeline has two components: `local.file` and `prometheus.remote_write`. The `local.file` component is configured with a single argument, `path`, which is set by calling the [env][] standard library function to retrieve the value of the `HOME` environment variable and concatenating it with the string `"file.txt"`. The `local.file` component has a single export, `content`, which contains the contents of the file. +This pipeline has two components: `local.file` and `prometheus.remote_write`. +The `local.file` component is configured with a single argument, `path`, which is set by calling the [env][] standard library function to retrieve the value of the `HOME` environment variable and concatenating it with the string `"file.txt"`. +The `local.file` component has a single export, `content`, which contains the contents of the file. -The `prometheus.remote_write` component is configured with an `endpoint` block, containing the `url` attribute and a `basic_auth` block. The `url` attribute is set to the URL of the Prometheus remote write endpoint. The `basic_auth` block contains the `username` and `password` attributes, which are set to the string `"admin"` and the `content` export of the `local.file` component, respectively. The `content` export is referenced by using the syntax `local.file.example.content`, where `local.file.example` is the fully qualified name of the component (the component's type + its label) and `content` is the name of the export. +The `prometheus.remote_write` component is configured with an `endpoint` block, containing the `url` attribute and a `basic_auth` block. +The `url` attribute is set to the URL of the Prometheus remote write endpoint. +The `basic_auth` block contains the `username` and `password` attributes, which are set to the string `"admin"` and the `content` export of the `local.file` component, respectively. +The `content` export is referenced by using the syntax `local.file.example.content`, where `local.file.example` is the fully qualified name of the component (the component's type + its label) and `content` is the name of the export.

Flow of example pipeline with local.file and prometheus.remote_write components

{{< admonition type="note" >}} -The `local.file` component's label is set to `"example"`, so the fully qualified name of the component is `local.file.example`. The `prometheus.remote_write` component's label is set to `"local_prom"`, so the fully qualified name of the component is `prometheus.remote_write.local_prom`. +The `local.file` component's label is set to `"example"`, so the fully qualified name of the component is `local.file.example`. +The `prometheus.remote_write` component's label is set to `"local_prom"`, so the fully qualified name of the component is `prometheus.remote_write.local_prom`. {{< /admonition >}} This example pipeline still doesn't do anything, so let's add some more components to it. ## Shipping your first metrics -[prometheus.exporter.unix]: https://grafana.com/docs/agent//flow/reference/components/prometheus.exporter.unix/ -[prometheus.scrape]: https://grafana.com/docs/agent//flow/reference/components/prometheus.scrape/ -[prometheus.remote_write]: https://grafana.com/docs/agent//flow/reference/components/prometheus.remote_write/ - **Recommended reading** - Optional: [prometheus.exporter.unix][] @@ -158,7 +153,9 @@ Run {{< param "PRODUCT_NAME" >}} with: /path/to/agent run config.river ``` -Navigate to [http://localhost:3000/explore](http://localhost:3000/explore) in your browser. After ~15-20 seconds, you should be able to see the metrics from the `prometheus.exporter.unix` component! Try querying for `node_memory_Active_bytes` to see the active memory of your host. +Navigate to [http://localhost:3000/explore][] in your browser. +After ~15-20 seconds, you should be able to see the metrics from the `prometheus.exporter.unix` component. +Try querying for `node_memory_Active_bytes` to see the active memory of your host.

Screenshot of node_memory_Active_bytes query in Grafana @@ -175,17 +172,18 @@ The following diagram is an example pipeline: The preceding configuration defines three components: - `prometheus.scrape` - A component that scrapes metrics from components that export targets. -- `prometheus.exporter.unix` - A component that exports metrics from the host, built around [node_exporter](https://github.com/prometheus/node_exporter). +- `prometheus.exporter.unix` - A component that exports metrics from the host, built around [node_exporter][]. - `prometheus.remote_write` - A component that sends metrics to a Prometheus remote-write compatible endpoint. -The `prometheus.scrape` component references the `prometheus.exporter.unix` component's targets export, which is a list of scrape targets. The `prometheus.scrape` component then forwards the scraped metrics to the `prometheus.remote_write` component. +The `prometheus.scrape` component references the `prometheus.exporter.unix` component's targets export, which is a list of scrape targets. +The `prometheus.scrape` component then forwards the scraped metrics to the `prometheus.remote_write` component. -One rule is that components can't form a cycle. This means that a component can't reference itself directly or indirectly. This is to prevent infinite loops from forming in the pipeline. +One rule is that components can't form a cycle. +This means that a component can't reference itself directly or indirectly. +This is to prevent infinite loops from forming in the pipeline. ## Exercise for the reader -[prometheus.exporter.redis]: https://grafana.com/docs/agent//flow/reference/components/prometheus.exporter.redis/ - **Recommended Reading** - Optional: [prometheus.exporter.redis][] @@ -196,7 +194,8 @@ Let's start a container running Redis and configure {{< param "PRODUCT_NAME" >}} docker container run -d --name flow-redis -p 6379:6379 --rm redis ``` -Try modifying the pipeline to scrape metrics from the Redis exporter. You can refer to the [prometheus.exporter.redis][] component documentation for more information on how to configure it. +Try modifying the pipeline to scrape metrics from the Redis exporter. +You can refer to the [prometheus.exporter.redis][] component documentation for more information on how to configure it. To give a visual hint, you want to create a pipeline that looks like this: @@ -205,9 +204,9 @@ To give a visual hint, you want to create a pipeline that looks like this:

{{< admonition type="note" >}} -[concat]: https://grafana.com/docs/agent//flow/reference/stdlib/concat/ - You may find the [concat][] standard library function useful. + +[concat]: ../../../reference/stdlib/concat/ {{< /admonition >}} You can run {{< param "PRODUCT_NAME" >}} with the new configuration file by running: @@ -216,7 +215,8 @@ You can run {{< param "PRODUCT_NAME" >}} with the new configuration file by runn /path/to/agent run config.river ``` -Navigate to [http://localhost:3000/explore](http://localhost:3000/explore) in your browser. After the first scrape, you should be able to query for `redis` metrics as well as `node` metrics. +Navigate to [http://localhost:3000/explore][] in your browser. +After the first scrape, you should be able to query for `redis` metrics as well as `node` metrics. To shut down the Redis container, run: @@ -225,10 +225,11 @@ docker container stop flow-redis ``` If you get stuck, you can always view a solution here: + {{< collapse title="Solution" >}} ```river -// Configure your first components, learn about the standard library, and learn how to run Grafana Agent +// Configure your first components, learn about the standard library, and learn how to run Grafana Alloy // prometheus.exporter.redis collects information about Redis and exposes // targets for other components to use @@ -267,8 +268,27 @@ prometheus.remote_write "local_prom" { ## Finishing up and next steps -You might have noticed that running {{< param "PRODUCT_NAME" >}} with the configurations created a directory called `data-agent` in the directory you ran {{< param "PRODUCT_NAME" >}} from. This directory is where components can store data, such as the `prometheus.exporter.unix` component storing its WAL (Write Ahead Log). If you look in the directory, do you notice anything interesting? The directory for each component is the fully qualified name. - -If you'd like to store the data elsewhere, you can specify a different directory by supplying the `--storage.path` flag to {{< param "PRODUCT_ROOT_NAME" >}}'s run command, for example, `/path/to/agent run config.river --storage.path /etc/grafana-agent`. Generally, you can use a persistent directory for this, as some components may use the data stored in this directory to perform their function. - -In the next tutorial, you will look at how to configure {{< param "PRODUCT_NAME" >}} to collect logs from a file and send them to Loki. You will also look at using different components to process metrics and logs before sending them. +You might have noticed that running {{< param "PRODUCT_NAME" >}} with the configurations created a directory called `data-agent` in the directory you ran {{< param "PRODUCT_NAME" >}} from. +This directory is where components can store data, such as the `prometheus.exporter.unix` component storing its WAL (Write Ahead Log). +If you look in the directory, do you notice anything interesting? The directory for each component is the fully qualified name. + +If you'd like to store the data elsewhere, you can specify a different directory by supplying the `--storage.path` flag to {{< param "PRODUCT_ROOT_NAME" >}}'s run command, for example, `/path/to/agent run config.river --storage.path /etc/grafana-agent`. +Generally, you can use a persistent directory for this, as some components may use the data stored in this directory to perform their function. + +In the next tutorial, you will look at how to configure {{< param "PRODUCT_NAME" >}} to collect logs from a file and send them to Loki. +You will also look at using different components to process metrics and logs before sending them. + +[Configuration language]: ../../../concepts/config-language/ +[Configuration language concepts]: ../../../concepts/configuration_language/ +[Standard library documentation]: ../../../reference/stdlib/ +[node_exporter]: https://github.com/prometheus/node_exporter +[River]: https://github.com/grafana/river +[prometheus.exporter.redis]: ../../../reference/components/prometheus.exporter.redis/ +[http://localhost:3000/explore]: http://localhost:3000/explore +[prometheus.exporter.unix]: ../../../reference/components/prometheus.exporter.unix/ +[prometheus.scrape]: ../../../reference/components/prometheus.scrape/ +[prometheus.remote_write]: ../../../reference/components/prometheus.remote_write/ +[Components]: ../../../concepts/components/ +[Component controller]: ../../../concepts/component_controller/ +[Components configuration language]: ../../../concepts/config-language/components/ +[env]: ../../../reference/stdlib/env/ diff --git a/docs/sources/tutorials/flow-by-example/get-started.md b/docs/sources/tutorials/flow-by-example/get-started.md new file mode 100644 index 0000000000..93d3fc0285 --- /dev/null +++ b/docs/sources/tutorials/flow-by-example/get-started.md @@ -0,0 +1,97 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tutorials/flow-by-example/get-started/ +description: Getting started with Flow-by-Example Tutorials +title: Get started +weight: 10 +--- + +## Who is this for? + +This set of tutorials contains a collection of examples that build on each other to demonstrate how to configure and use [{{< param "PRODUCT_NAME" >}}][alloy]. +It assumes you have a basic understanding of what {{< param "PRODUCT_ROOT_NAME" >}} is and telemetry collection in general. +It also assumes a base level of familiarity with Prometheus and PromQL, Loki and LogQL, and basic Grafana navigation. +It assumes no knowledge of {{< param "PRODUCT_NAME" >}} or River concepts. + +## What is {{% param "PRODUCT_NAME" %}}? + +{{< param "PRODUCT_NAME" >}} uses a declarative configuration language that allows you to define a pipeline of telemetry collection, processing, and output. +It is built on top of the [River][] configuration language, which is designed to be fast, simple, and debuggable. + +## What do I need to get started? + +You will need a Linux or Unix environment with Docker installed. +The examples are designed to be run on a single host so that you can run them on your laptop or in a VM. +You are encouraged to follow along with the examples using a `config.river` file and experiment with the examples yourself. + +To run the examples, you should have a {{< param "PRODUCT_NAME" >}} binary available. +You can follow the instructions on how to [Install {{< param "PRODUCT_NAME" >}} as a Standalone Binary][install] to get a binary. + +## How should I follow along? + +You can use this Docker-compose file to set up a local Grafana instance alongside Loki and Prometheus pre-configured as datasources. +The examples are designed to be run locally, so you can follow along and experiment with them yourself. + +```yaml +version: '3' +services: + loki: + image: grafana/loki:2.9.0 + ports: + - "3100:3100" + command: -config.file=/etc/loki/local-config.yaml + prometheus: + image: prom/prometheus:v2.47.0 + command: + - --web.enable-remote-write-receiver + - --config.file=/etc/prometheus/prometheus.yml + ports: + - "9090:9090" + grafana: + environment: + - GF_PATHS_PROVISIONING=/etc/grafana/provisioning + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + entrypoint: + - sh + - -euc + - | + mkdir -p /etc/grafana/provisioning/datasources + cat < /etc/grafana/provisioning/datasources/ds.yaml + apiVersion: 1 + datasources: + - name: Loki + type: loki + access: proxy + orgId: 1 + url: http://loki:3100 + basicAuth: false + isDefault: false + version: 1 + editable: false + - name: Prometheus + type: prometheus + orgId: 1 + url: http://prometheus:9090 + basicAuth: false + isDefault: true + version: 1 + editable: false + EOF + /run.sh + image: grafana/grafana:latest + ports: + - "3000:3000" +``` + +After running `docker-compose up`, open [http://localhost:3000](http://localhost:3000) in your browser to view the Grafana UI. + +The tutorials are designed to be followed in order and generally build on each other. +Each example explains what it does and how it works. +They are designed to be run locally, so you can follow along and experiment with them yourself. + +The Recommended Reading sections in each tutorial provide a list of documentation topics. +To help you understand the concepts used in the example, read the recommended topics in the order given. + +[alloy]: https://grafana.com/docs/alloy/latest/ +[River]: https://github.com/grafana/river +[install]: ../../../setup/install/binary/#install-grafana-agent-in-flow-mode-as-a-standalone-binary diff --git a/docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md b/docs/sources/tutorials/flow-by-example/logs-and-relabeling-basics/index.md similarity index 64% rename from docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md rename to docs/sources/tutorials/flow-by-example/logs-and-relabeling-basics/index.md index 02c7c3c138..f5d9c97820 100644 --- a/docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md +++ b/docs/sources/tutorials/flow-by-example/logs-and-relabeling-basics/index.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/logs-and-relabeling-basics/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/flow-by-example/logs-and-relabeling-basics/ description: Learn how to relabel metrics and collect logs title: Logs and relabeling basics weight: 30 @@ -12,17 +7,17 @@ weight: 30 # Logs and relabeling basics -This tutorial assumes you have completed the [First components and introducing the standard library](https://grafana.com/docs/agent//flow/tutorials/flow-by-example/first-components-and-stdlib/) tutorial, or are at least familiar with the concepts of components, attributes, and expressions and how to use them. You will cover some basic metric relabeling, followed by how to send logs to Loki. +This tutorial assumes you have completed the [First components and introducing the standard library][] tutorial, or are at least familiar with the concepts of components, attributes, and expressions and how to use them. +You will cover some basic metric relabeling, followed by how to send logs to Loki. ## Relabel metrics -[prometheus.relabel]: https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel/ - **Recommended reading** - Optional: [prometheus.relabel][] -Before moving on to logs, let's look at how we can use the `prometheus.relabel` component to relabel metrics. The `prometheus.relabel` component allows you to perform Prometheus relabeling on metrics and is similar to the `relabel_configs` section of a Prometheus scrape config. +Before moving on to logs, let's look at how we can use the `prometheus.relabel` component to relabel metrics. +The `prometheus.relabel` component allows you to perform Prometheus relabeling on metrics and is similar to the `relabel_configs` section of a Prometheus scrape configuration. Let's add a `prometheus.relabel` component to a basic pipeline and see how to add labels. @@ -64,35 +59,37 @@ We have now created the following pipeline: This pipeline has a `prometheus.relabel` component that has a single rule. This rule has the `replace` action, which will replace the value of the `os` label with a special value: `constants.os`. This value is a special constant that is replaced with the OS of the host {{< param "PRODUCT_ROOT_NAME" >}} is running on. -You can see the other available constants in the [constants](https://grafana.com/docs/agent//flow/reference/stdlib/constants/) documentation. +You can see the other available constants in the [constants][] documentation. This example has one rule block, but you can have as many as you want. Each rule block is applied in order. -If you run {{< param "PRODUCT_ROOT_NAME" >}} and navigate to [localhost:3000/explore](http://localhost:3000/explore), you can see the `os` label on the metrics. Try querying for `node_context_switches_total` and look at the labels. +If you run {{< param "PRODUCT_ROOT_NAME" >}} and navigate to [localhost:3000/explore][], you can see the `os` label on the metrics. +Try querying for `node_context_switches_total` and look at the labels. -Relabeling uses the same rules as Prometheus. You can always refer to the [prometheus.relabel documentation](https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel/#rule-block) for a full list of available options. +Relabeling uses the same rules as Prometheus. You can always refer to the [prometheus.relabel rule-block][] documentation for a full list of available options. {{< admonition type="note" >}} You can forward multiple components to one `prometheus.relabel` component. This allows you to apply the same relabeling rules to multiple pipelines. {{< /admonition >}} {{< admonition type="warning" >}} -There is an issue commonly faced when relabeling and using labels that start with `__` (double underscore). These labels are considered internal and are dropped before relabeling rules from a `prometheus.relabel` component are applied. If you would like to keep or act on these kinds of labels, use a [discovery.relabel](https://grafana.com/docs/agent//flow/reference/components/discovery.relabel/) component. +There is an issue commonly faced when relabeling and using labels that start with `__` (double underscore). +These labels are considered internal and are dropped before relabeling rules from a `prometheus.relabel` component are applied. +If you would like to keep or act on these kinds of labels, use a [discovery.relabel][] component. + +[discovery.relabel]: ../../../reference/components/discovery.relabel/ {{< /admonition >}} ## Send logs to Loki -[local.file_match]: https://grafana.com/docs/agent//flow/reference/components/local.file_match/ -[loki.source.file]: https://grafana.com/docs/agent//flow/reference/components/loki.source.file/ -[loki.write]: https://grafana.com/docs/agent//flow/reference/components/loki.write/ - **Recommended reading** - Optional: [local.file_match][] - Optional: [loki.source.file][] - Optional: [loki.write][] -Now that you're comfortable creating components and chaining them together, let's collect some logs and send them to Loki. We will use the `local.file_match` component to perform file discovery, the `loki.source.file` to collect the logs, and the `loki.write` component to send the logs to Loki. +Now that you're comfortable creating components and chaining them together, let's collect some logs and send them to Loki. +We will use the `local.file_match` component to perform file discovery, the `loki.source.file` to collect the logs, and the `loki.write` component to send the logs to Loki. Before doing this, we need to ensure we have a log file to scrape. We will use the `echo` command to create a file with some log content. @@ -124,7 +121,8 @@ The rough flow of this pipeline is: ![Diagram of pipeline that collects logs from /tmp/flow-logs and writes them to a local Loki instance](/media/docs/agent/diagram-flow-by-example-logs-0.svg) -If you navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`, you can query for `{filename="/tmp/flow-logs/log.log"}` and see the log line we created earlier. Try running the following command to add more logs to the file. +If you navigate to [localhost:3000/explore][] and switch the Datasource to `Loki`, you can query for `{filename="/tmp/flow-logs/log.log"}` and see the log line we created earlier. +Try running the following command to add more logs to the file. ```bash echo "This is another log line!" >> /tmp/flow-logs/log.log @@ -134,14 +132,11 @@ If you re-execute the query, you can see the new log lines. ![Grafana Explore view of example log lines](/media/docs/agent/screenshot-flow-by-example-log-lines.png) -If you are curious how {{< param "PRODUCT_ROOT_NAME" >}} keeps track of where it is in a log file, you can look at `data-agent/loki.source.file.local_files/positions.yml`. +If you are curious how {{< param "PRODUCT_ROOT_NAME" >}} keeps track of where it's in a log file, you can look at `data-agent/loki.source.file.local_files/positions.yml`. If you delete this file, {{< param "PRODUCT_ROOT_NAME" >}} starts reading from the beginning of the file again, which is why keeping the {{< param "PRODUCT_ROOT_NAME" >}}'s data directory in a persistent location is desirable. ## Exercise -[loki.relabel]: https://grafana.com/docs/agent//flow/reference/components/loki.relabel/ -[loki.process]: https://grafana.com/docs/agent//flow/reference/components/loki.process/ - **Recommended reading** - [loki.relabel][] @@ -149,7 +144,8 @@ If you delete this file, {{< param "PRODUCT_ROOT_NAME" >}} starts reading from t ### Add a Label to Logs -This exercise will have two parts, building on the previous example. Let's start by adding an `os` label (just like the Prometheus example) to all of the logs we collect. +This exercise will have two parts, building on the previous example. +Let's start by adding an `os` label (just like the Prometheus example) to all of the logs we collect. Modify the following snippet to add the label `os` with the value of the `os` constant. @@ -171,7 +167,10 @@ loki.write "local_loki" { ``` {{< admonition type="note" >}} -You can use the [loki.relabel](https://grafana.com/docs/agent//flow/reference/components/loki.relabel) component to relabel and add labels, just like you can with the [prometheus.relabel](https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel) component. +You can use the [loki.relabel][] component to relabel and add labels, just like you can with the [prometheus.relabel][] component. + +[loki.relabel]: ../../../reference/components/loki.relabel +[prometheus.relabel]: ../../../reference/components/prometheus.relabel {{< /admonition >}} Once you have your completed configuration, run {{< param "PRODUCT_ROOT_NAME" >}} and execute the following: @@ -182,9 +181,11 @@ echo 'level=warn msg="WARN: This is a warn level log!"' >> /tmp/flow-logs/log.lo echo 'level=debug msg="DEBUG: This is a debug level log!"' >> /tmp/flow-logs/log.log ``` -Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. Try querying for `{filename="/tmp/flow-logs/log.log"}` and see if you can find the new label! +Navigate to [localhost:3000/explore][] and switch the Datasource to `Loki`. +Try querying for `{filename="/tmp/flow-logs/log.log"}` and see if you can find the new label! -Now that we have added new labels, we can also filter on them. Try querying for `{os!=""}`. You should only see the lines you added in the previous step. +Now that we have added new labels, we can also filter on them. Try querying for `{os!=""}`. +You should only see the lines you added in the previous step. {{< collapse title="Solution" >}} @@ -221,10 +222,12 @@ loki.write "local_loki" { ### Extract and add a Label from Logs {{< admonition type="note" >}} -This exercise is more challenging than the previous one. If you are having trouble, skip it and move to the next section, which will cover some of the concepts used here. You can always come back to this exercise later. +This exercise is more challenging than the previous one. +If you are having trouble, skip it and move to the next section, which will cover some of the concepts used here. +You can always come back to this exercise later. {{< /admonition >}} -This exercise will build on the previous one, though it's more involved. +This exercise will build on the previous one, though it's more involved. Let's say we want to extract the `level` from the logs and add it as a label. As a starting point, look at [loki.process][]. This component allows you to perform processing on logs, including extracting values from log contents. @@ -236,7 +239,7 @@ If needed, you can find a solution to the previous exercise at the end of the [p The `stage.logfmt` and `stage.labels` blocks for `loki.process` may be helpful. {{< /admonition >}} -Once you have your completed config, run {{< param "PRODUCT_ROOT_NAME" >}} and execute the following: +Once you have your completed configuration, run {{< param "PRODUCT_ROOT_NAME" >}} and execute the following: ```bash echo 'level=info msg="INFO: This is an info level log!"' >> /tmp/flow-logs/log.log @@ -244,7 +247,7 @@ echo 'level=warn msg="WARN: This is a warn level log!"' >> /tmp/flow-logs/log.lo echo 'level=debug msg="DEBUG: This is a debug level log!"' >> /tmp/flow-logs/log.log ``` -Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. Try querying for `{level!=""}` to see the new labels in action. +Navigate to [localhost:3000/explore][] and switch the Datasource to `Loki`. Try querying for `{level!=""}` to see the new labels in action. ![Grafana Explore view of example log lines, now with the extracted 'level' label](/media/docs/agent/screenshot-flow-by-example-log-line-levels.png) @@ -304,5 +307,16 @@ loki.write "local_loki" { ## Finishing up and next steps -You have learned the concepts of components, attributes, and expressions. You have also seen how to use some standard library components to collect metrics and logs. In the next tutorial, you will learn more about how to use the `loki.process` component to extract values from logs and use them. - +You have learned the concepts of components, attributes, and expressions. You have also seen how to use some standard library components to collect metrics and logs. +In the next tutorial, you will learn more about how to use the `loki.process` component to extract values from logs and use them. + +[First components and introducing the standard library]: ../first-components-and-stdlib/ +[prometheus.relabel]: ../../../reference/components/prometheus.relabel/ +[constants]: ../../../reference/stdlib/constants/ +[localhost:3000/explore]: http://localhost:3000/explore +[prometheus.relabel rule-block]: ../../../reference/components/prometheus.relabel/#rule-block +[local.file_match]: ../../../reference/components/local.file_match/ +[loki.source.file]: ../../../reference/components/loki.source.file/ +[loki.write]: ../../../reference/components/loki.write/ +[loki.relabel]: ../../../reference/components/loki.relabel/ +[loki.process]: ../../../reference/components/loki.process/ diff --git a/docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md b/docs/sources/tutorials/flow-by-example/processing-logs/index.md similarity index 81% rename from docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md rename to docs/sources/tutorials/flow-by-example/processing-logs/index.md index 327b40716c..22e52dc001 100644 --- a/docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md +++ b/docs/sources/tutorials/flow-by-example/processing-logs/index.md @@ -1,10 +1,5 @@ --- -aliases: -- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/processing-logs/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/processing-logs/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/processing-logs/ -- /docs/grafana-cloud/send-data/agent/flow/tutorials/processing-logs/ -canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/processing-logs/ +canonical: https://grafana.com/docs/alloy/latest/tutorials/flow-by-example/processing-logs/ description: Learn how to process logs title: Processing Logs weight: 40 @@ -19,7 +14,7 @@ It covers using `loki.source.api` to receive logs over HTTP, processing and filt **Recommended reading** -- Optional: [loki.source.api](https://grafana.com/docs/agent//flow/reference/components/loki.source.api/) +- Optional: [loki.source.api][] The `loki.source.api` component can receive logs over HTTP. It can be useful for receiving logs from other {{< param "PRODUCT_ROOT_NAME" >}}s or collectors, or directly from applications that can send logs over HTTP, and then processing them centrally. @@ -51,9 +46,9 @@ Next, you can configure the `loki.process` and `loki.write` components. **Recommended reading** -- [loki.process#stage.drop](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagedrop-block) -- [loki.process#stage.json](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagejson-block) -- [loki.process#stage.labels](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagelabels-block) +- [loki.process#stage.drop][] +- [loki.process#stage.json][] +- [loki.process#stage.labels][] ```river // Let's send and process more logs! @@ -142,7 +137,8 @@ In subsequent stages, you can use the extracted map to filter logs, add or remov `stage.*` blocks are executed in the order they appear in the component, top down. {{< /admonition >}} -Let's use an example log line to illustrate this, then go stage by stage, showing the contents of the extracted map. Here is our example log line: +Let's use an example log line to illustrate this, then go stage by stage, showing the contents of the extracted map. +Here is our example log line: ```json { @@ -166,10 +162,11 @@ stage.json { } ``` -This stage parses the log line as JSON, extracts two values from it, `log` and `timestamp`, and puts them into the extracted map with keys `log` and `ts`, respectively. +This stage parses the log line as JSON, extracts two values from it, `log` and `timestamp`, and puts them into the extracted map with keys `log` and `ts`, respectively. {{< admonition type="note" >}} -Supplying an empty string is shorthand for using the same key as in the input log line (so `log = ""` is the same as `log = "log"`). The _keys_ of the `expressions` object end up as the keys in the extracted map, and the _values_ are used as keys to look up in the parsed log line. +Supplying an empty string is shorthand for using the same key as in the input log line (so `log = ""` is the same as `log = "log"`). +The _keys_ of the `expressions` object end up as the keys in the extracted map, and the _values_ are used as keys to look up in the parsed log line. {{< /admonition >}} If this were Python, it would be roughly equivalent to: @@ -293,7 +290,7 @@ stage.drop { This stage acts on the `is_secret` value in the extracted map, which is a value that you extracted in the previous stage. This stage drops the log line if the value of `is_secret` is `"true"` and does not modify the extracted map. There are many other ways to filter logs, but this is a simple example. -Refer to the [loki.process#stage.drop](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagedrop-block) documentation for more information. +Refer to the [loki.process#stage.drop][] documentation for more information. ### Stage 5 @@ -320,12 +317,12 @@ stage.output { This stage uses the `log_line` value in the extracted map to set the actual log line that is forwarded to Loki. Rather than sending the entire JSON blob to Loki, you are only sending `original_log_line["log"]["message"]`, along with some labels that you attached. -This stage does not modify the extracted map. +This stage doesn't modify the extracted map. ## Putting it all together -Now that you have all of the pieces, let's run the {{< param "PRODUCT_ROOT_NAME" >}} and send some logs to it. -Modify `config.river` with the config from the previous example and start the {{< param "PRODUCT_ROOT_NAME" >}} with: +Now that you have all of the pieces, let's run {{< param "PRODUCT_ROOT_NAME" >}} and send some logs to it. +Modify `config.river` with the config from the previous example and start {{< param "PRODUCT_ROOT_NAME" >}} with: ```bash /path/to/agent run config.river @@ -344,7 +341,7 @@ curl localhost:9999/loki/api/v1/raw -XPOST -H "Content-Type: application/json" - ``` Now that you have sent some logs, let's see how they look in Grafana. -Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. +Navigate to [localhost:3000/explore][] and switch the Datasource to `Loki`. Try querying for `{source="demo-api"}` and see if you can find the logs you sent. Try playing around with the values of `"level"`, `"message"`, `"timestamp"`, and `"is_secret"` and see how the logs change. @@ -355,12 +352,12 @@ You can also try adding more stages to the `loki.process` component to extract m ## Exercise Since you are already using Docker and Docker exports logs, let's get those logs into Loki. -You can refer to the [discovery.docker](https://grafana.com/docs/agent//flow/reference/components/discovery.docker/) and [loki.source.docker](https://grafana.com/docs/agent//flow/reference/components/loki.source.docker/) documentation for more information. +You can refer to the [discovery.docker][] and [loki.source.docker][] documentation for more information. To ensure proper timestamps and other labels, make sure you use a `loki.process` component to process the logs before sending them to Loki. -Although you have not used it before, let's use a `discovery.relabel` component to attach the container name as a label to the logs. -You can refer to the [discovery.relabel](https://grafana.com/docs/agent//flow/reference/components/discovery.relabel/) documentation for more information. +Although you haven't used it before, let's use a `discovery.relabel` component to attach the container name as a label to the logs. +You can refer to the [discovery.relabel][] documentation for more information. The `discovery.relabel` component is very similar to the `prometheus.relabel` component, but is used to relabel discovered targets rather than metrics. {{< collapse title="Solution" >}} @@ -404,4 +401,13 @@ loki.write "local_loki" { } ``` -{{< /collapse >}} \ No newline at end of file +{{< /collapse >}} + +[loki.source.api]: ../../../reference/components/loki.source.api/ +[loki.process#stage.drop]: ../../../reference/components/loki.process/#stagedrop-block +[loki.process#stage.json]: ../../../reference/components/loki.process/#stagejson-block +[loki.process#stage.labels]: ../../../reference/components/loki.process/#stagelabels-block +[localhost:3000/explore]: http://localhost:3000/explore +[discovery.docker]: ../../../reference/components/discovery.docker/ +[loki.source.docker]: ../../../reference/components/loki.source.docker/ +[discovery.relabel]: ../../../reference/components/discovery.relabel/ diff --git a/docs/variables.mk b/docs/variables.mk index 82dba27a62..43189540d9 100644 --- a/docs/variables.mk +++ b/docs/variables.mk @@ -4,4 +4,4 @@ # This results in the content being served at /docs/agent/latest/. # The source of the content is the current repository which is determined by the name of the parent directory of the git root. # This overrides the default behavior of assuming the repository directory is the same as the project name. -PROJECTS := agent::$(notdir $(basename $(shell git rev-parse --show-toplevel))) +PROJECTS := alloy::$(notdir $(basename $(shell git rev-parse --show-toplevel))) diff --git a/internal/tools/docs_generator/docs_updated_test.go b/internal/tools/docs_generator/docs_updated_test.go index e21822d7c0..3c45abe4df 100644 --- a/internal/tools/docs_generator/docs_updated_test.go +++ b/internal/tools/docs_generator/docs_updated_test.go @@ -31,7 +31,7 @@ func TestLinksToTypesSectionsUpdated(t *testing.T) { } func TestCompatibleComponentsPageUpdated(t *testing.T) { - path := filepath.Join(moduleRoot, "docs/sources/flow/reference/compatibility/_index.md") + path := filepath.Join(moduleRoot, "docs/sources/reference/compatibility/_index.md") for _, typ := range metadata.AllTypes { t.Run(typ.Name, func(t *testing.T) { t.Run("exporters", func(t *testing.T) { diff --git a/internal/tools/docs_generator/links_to_types.go b/internal/tools/docs_generator/links_to_types.go index 4c92c0b23e..e6960fe83b 100644 --- a/internal/tools/docs_generator/links_to_types.go +++ b/internal/tools/docs_generator/links_to_types.go @@ -76,7 +76,7 @@ func (l *LinksToTypesGenerator) endMarker() string { } func (l *LinksToTypesGenerator) pathToComponentMarkdown() string { - return fmt.Sprintf("../../../docs/sources/flow/reference/components/%s.md", l.component) + return fmt.Sprintf("../../../docs/sources/reference/components/%s.md", l.component) } func outputComponentsSection(name string, meta metadata.Metadata) string {