Skip to content

Commit

Permalink
wip: [Docs] Revamp the documentation, install files, and examples
Browse files Browse the repository at this point in the history
Signed-off-by: Michael Edgar <[email protected]>
  • Loading branch information
MikeEdgar committed Sep 6, 2024
1 parent c77da11 commit b8b7bc0
Show file tree
Hide file tree
Showing 46 changed files with 196 additions and 1,499 deletions.
98 changes: 44 additions & 54 deletions .github/workflows/playwright-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ on:
env:
TARGET_NAMESPACE: "console-namespace"
CI_CLUSTER: true
CONSOLE_INSTANCE_YAML: "./install/resources/console/console.instance.yaml"
OLM_VERSION: "v0.28.0"
YQ_VERSION: "v4.44.1"

Expand All @@ -23,7 +22,7 @@ jobs:
uses: medyagh/setup-minikube@latest
with:
cpus: 2
memory: 4096m
memory: 8g
addons: registry,ingress,ingress-dns
insecure-registry: 'localhost:5000,10.0.0.0/24'
start-args: '--extra-config=kubeadm.ignore-preflight-errors=SystemVerification --extra-config=apiserver.authorization-mode=RBAC,Node'
Expand Down Expand Up @@ -89,16 +88,9 @@ jobs:
set -x
# Create the CatalogSource with the Console operator bundle
echo '---
apiVersion: operators.coreos.com/v1alpha1
kind: CatalogSource
metadata:
name: console-operator-catalog
spec:
displayName: StreamsHub
image: localhost:5000/streamshub/console-operator-catalog:${{ env.PROJECT_VERSION }}
publisher: StreamsHub
sourceType: grpc' | kubectl apply -n olm -f -
yq ea '.spec.image = "localhost:5000/streamshub/console-operator-catalog:${{ env.PROJECT_VERSION }}"' \
./install/operator-olm/010-CatalogSource-console-operator-catalog.yaml \
| kubectl apply -n olm -f -
kubectl wait catalogsource/console-operator-catalog -n olm \
--for=jsonpath='{.status.connectionState.lastObservedState}'=READY \
Expand All @@ -117,58 +109,56 @@ jobs:
sourceNamespace: olm' | kubectl apply -n operators -f -
# Install Console Operator
echo '---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: console-operator
spec:
channel: alpha
name: console-operator
source: console-operator-catalog
sourceNamespace: olm
config:
env:
- name: CONSOLE_DEPLOYMENT_DEFAULT_API_IMAGE
value: localhost:5000/streamshub/console-api:${{ env.PROJECT_VERSION }}
- name: CONSOLE_DEPLOYMENT_DEFAULT_UI_IMAGE
value: localhost:5000/streamshub/console-ui:${{ env.PROJECT_VERSION }}
' | kubectl apply -n operators -f -
# Wait for Strimzi Operator
kubectl get deployment --selector=operators.coreos.com/strimzi-kafka-operator.operators -n operators
while [ $(kubectl get deployment --selector=operators.coreos.com/strimzi-kafka-operator.operators -n operators -o name | wc -l) -lt 1 ] ; do
echo "Waiting for Strimzi Deployment to be present"
sleep 10
done
STRIMZI_DEPLOYMENT=$(kubectl get deployment --selector=operators.coreos.com/strimzi-kafka-operator.operators -n operators -o name | tail -1)
echo "Found Strimzi Operator Deployment: ${STRIMZI_DEPLOYMENT}"
kubectl wait ${STRIMZI_DEPLOYMENT} --for=condition=available --timeout=180s -n operators
# Wait for Console Operator
while [ $(kubectl get deployment --selector=operators.coreos.com/console-operator.operators -n operators -o name | wc -l) -lt 1 ] ; do
echo "Waiting for Console Operator Deployment to be present"
sleep 5
done
CONSOLE_DEPLOYMENT=$(kubectl get deployment --selector=operators.coreos.com/strimzi-kafka-operator.operators -n operators -o name | tail -1)
echo "Found Console Operator Deployment: ${CONSOLE_DEPLOYMENT}"
kubectl wait ${CONSOLE_DEPLOYMENT} --for=condition=available --timeout=180s -n operators
yq ea '.spec.sourceNamespace = "olm", .spec.config = {
"env": [{
"name": "CONSOLE_DEPLOYMENT_DEFAULT_API_IMAGE",
"value": "localhost:5000/streamshub/console-api:${{ env.PROJECT_VERSION }}"
}, {
"name": "CONSOLE_DEPLOYMENT_DEFAULT_UI_IMAGE",
"value": "localhost:5000/streamshub/console-ui:${{ env.PROJECT_VERSION }}"
}]
}' ./install/operator-olm/020-Subscription-console-operator.yaml \
| kubectl apply -n operators -f -
wait_operator() {
local OPERATOR=${1}
while [ $(kubectl get deployment --selector=operators.coreos.com/${OPERATOR}.operators -n operators -o name | wc -l) -lt 1 ] ; do
echo "Waiting for Deployment ${OPERATOR} to be present"
sleep 5
done
local OPERATOR_DEPLOYMENT=$(kubectl get deployment --selector=operators.coreos.com/${OPERATOR}.operators -n operators -o name | tail -1)
echo "Found Operator Deployment: ${OPERATOR_DEPLOYMENT}, waiting for condition 'Available'"
kubectl wait ${OPERATOR_DEPLOYMENT} --for=condition=available --timeout=180s -n operators
}
export -f wait_operator
timeout 300s bash -c 'wait_operator "strimzi-kafka-operator"'
timeout 300s bash -c 'wait_operator "console-operator"'
# replace with resources in docs PR
- name: Deploy Kafka Cluster & Console
run: |
set -x
./install/002-deploy-console-kafka.sh $TARGET_NAMESPACE $CLUSTER_DOMAIN
export LISTENER_TYPE=ingress
cat examples/kafka/*.yaml | envsubst | kubectl apply -n ${TARGET_NAMESPACE} -f -
kubectl wait kafka/console-kafka --for=condition=Ready --timeout=300s -n $TARGET_NAMESPACE
kubectl wait kafkauser/console-kafka-user1 --for=condition=Ready --timeout=60s -n $TARGET_NAMESPACE
cat $CONSOLE_INSTANCE_YAML | envsubst && echo
cat $CONSOLE_INSTANCE_YAML | envsubst | kubectl apply -n $TARGET_NAMESPACE -f -
# Display the resource
export KAFKA_NAMESPACE="${TARGET_NAMESPACE}"
cat examples/console/* | envsubst && echo
# Apply the resource
cat examples/console/* | envsubst | kubectl apply -n ${TARGET_NAMESPACE} -f -
kubectl wait console/example --for=condition=Ready --timeout=300s -n $TARGET_NAMESPACE
# Sleep to ensure ingress fully available
sleep 10
- name: Console Smoke Test
Expand Down
133 changes: 96 additions & 37 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,65 +1,123 @@
# Console for Apache Kafka<sup>®</sup> on Kubernetes

# StreamsHub Console for Apache Kafka<sup>®</sup>
[![License](https://img.shields.io/badge/license-Apache--2.0-blue.svg)](http://www.apache.org/licenses/LICENSE-2.0) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=streamshub_console&metric=alert_status)](https://sonarcloud.io/summary/new_code?id=streamshub_console) [![Coverage](https://sonarcloud.io/api/project_badges/measure?project=streamshub_console&metric=coverage)](https://sonarcloud.io/summary/new_code?id=streamshub_console)

This project is a web console designed to facilitate interactions with Apache Kafka<sup>®</sup> instances on Kubernetes, leveraging the [Strimzi](https://strimzi.io) Cluster Operator.
StreamsHub Console is a web application designed to facilitate interactions with Apache Kafka<sup>®</sup> instances, optionally leveraging the [Strimzi](https://strimzi.io) Cluster Operator for Kafka<sup>®</sup> instances running on Kubernetes.
It is composed of three main parts:

- a [REST API](./api) backend developed with Java and [Quarkus](https://quarkus.io/)
- a [user interface (UI)](./ui) built with [Next.js](https://nextjs.org/) and [PatternFly](https://patternfly.org)
- a Kubernetes [operator](./operator) developed with Java and [Quarkus](https://quarkus.io/)

#### Roadmap / Goals
## Roadmap / Goals

The future goals of this project are to provide a user interface to interact with and manage additional data streaming components such as:

- [Apicurio Registry](https://www.apicur.io/registry/) for message serialization and de-serialization + validation
- [Kroxylicious](https://kroxylicious.io/)
- [Apache Flink](https://flink.apache.org/)

Contributions and discussions around use cases for these (and other relevant) components are both welcome and encouraged.

## Running the Application

The console application may either be run in a Kubernetes cluster or locally to try it out.

### Install to Kubernetes

Please refer to the [installation README](./install/README.md) file for detailed information about how to install the latest release of the console in a Kubernetes cluster.

### Run locally

Running the console locally requires the use of a remote or locally-running Kubernetes cluster that hosts the Strimzi Kafka operator
and any Apache Kafka<sup>®</sup> clusters that will be accessed from the console. To get started, you will need to provide a console configuration
file and credentials to connect to the Kubernetes cluster where Strimzi and Kafka are available.
## Deployment
There are several ways to deploy the console - via the operator using the Operator Lifecycle Manager (OLM), via the operator using plain Kubernetes resources, or directly with Kubernetes resources (without the operator).

Note, if you are using [minikube](https://minikube.sigs.k8s.io/) with the `ingress` addon as your Kubernetes cluster, SSL pass-through must be enabled on the nginx controller:
```shell
# Enable TLS passthrough on the ingress deployment
kubectl patch deployment -n ingress-nginx ingress-nginx-controller \
--type='json' \
-p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value":"--enable-ssl-passthrough"}]'
```

### Prerequisites
#### Kafka
The instructions below assume an existing Apache Kafka<sup>®</sup> cluster is available to use from the console. We recommend using [Strimzi](https://strimzi.io) to create and manage your Apache Kafka<sup>®</sup> clusters - plus the console provides additional features and insights for Strimzi Apache Kafka<sup>®</sup> clusters.

If you already have Strimzi installed but would like to create an Apache Kafka<sup>®</sup> cluster for use with the console, example resources are available to get started. This example will create an Apache Kafka<sup>®</sup> cluster in KRaft mode with SCRAM-SHA-512 authentication, a Strimzi `KafkaNodePool` resource to manage the cluster nodes, and a Strimzi `KafkaUser` resource that may be used to connect to the cluster.

Modify the `CLUSTER_DOMAIN` to match the base domain of your Kubernetes cluster (used for ingress configuration), use either `route` (OpenShift) or `ingress` (vanilla Kubernetes) for `LISTENER_TYPE`, and set `NAMESPACE` to be the namespace where the Apache Kafka<sup>®</sup> cluster will be created.
```shell
export CLUSTER_DOMAIN=apps-crc.testing
export NAMESPACE=kafka
export LISTENER_TYPE=route
cat examples/kafka/*.yaml | envsubst | kubectl apply -n ${NAMESPACE} -f -
```
##### Kafka Authorization
In order to allow the necessary access for the console to function, a minimum level of authorization must be configured for the principal in use for each Kafka cluster connection. While the definition of the permissions may vary depending on the authorization framework in use (e.g. ACLs, Keycloak Authorization, OPA, or custom) the minimum required in terms of ACL types are:
1. `DESCRIBE`, `DESCRIBE_CONFIGS` for the `CLUSTER` resource
2. `READ`, `DESCRIBE`, `DESCRIBE_CONFIGS` for all `TOPIC` resources
3. `READ`, `DESCRIBE` for all `GROUP` resources

#### Prometheus
Prometheus is an optional dependency of the console if cluster metrics are to be displayed. The operator currently installs a private Prometheus instance for each `Console` instance. However, when installing a single console deployment, Prometheus must be either installed separately or provided via a URL reference. This will be addressed below in the section dealing with creating a console via a `Deployment`.

### Deploy the operator with OLM
The preferred way to deploy the console is using the Operator Lifecycle Manager, or OLM. The sample install files in `install/operator-olm` will install the operator with cluster-wide scope. This means that `Console` instances may be created in any namespace. If you wish to limit the scope of the operator, the `OperatorGroup` resource may be modified to specify only the namespace that should be watched by the operator.

This example will create the operator's OLM resources in the `default` namespace. Modify the `NAMESPACE` variable according to your needs.
```shell
export NAMESPACE=default
cat install/operator-olm/*.yaml | envsubst | kubectl apply -n ${NAMESPACE} -f -
```
#### Console Custom Resource Example
Once the operator is ready, you may then create a `Console` resource in the namespace where the console should be deployed. This example `Console` is based on the example Apache Kafka<sup>®</sup> cluster deployed above in the [prerequisites section](#prerequisites). Also see [examples/console/010-Console-example.yaml](examples/console/010-Console-example.yaml).
```yaml
apiVersion: console.streamshub.github.com/v1alpha1
kind: Console
metadata:
name: example
spec:
hostname: example-console.apps-crc.testing # Hostname where the console will be accessed via HTTPS
kafkaClusters:
- name: console-kafka # Name of the `Kafka` CR representing the cluster
namespace: kafka # Namespace of the `Kafka` CR representing the cluster
listener: secure # Listener on the `Kafka` CR to connect from the console
properties:
values: [] # Array of name/value for properties to be used for connections
# made to this cluster
valuesFrom: [] # Array of references to ConfigMaps or Secrets with properties
# to be used for connections made to this cluster
credentials:
kafkaUser:
name: console-kafka-user1 # Name of the `KafkaUser` resource used to connect to Kafka
# This is optional if properties are used to configure the user
```

### Deploy the operator directly
Deploying the operator without the use of OLM requires applying the component Kubernetes resources for the operator directly. These resources are bundled and attached to each StreamsHub Console release. The latest release can be found [here](https://github.com/streamshub/console/releases/latest). The resource file is named `console-operator-x.y.z.yaml` where `x.y.z` is the released version.

This example will create the operator's resources in the `default` namespace. Modify the `NAMESPACE` variable according to your needs and set `VERSION` to the [latest release](https://github.com/streamshub/console/releases/latest).
```
export NAMESPACE=default
export VERSION=0.3.3
curl -sL https://github.com/streamshub/console/releases/download/${VERSION}/console-operator-${VERSION}.yaml \
| envsubst \
| kubectl apply -n ${NAMESPACE} -f -
```
Note: if you are not using the Prometheus operator you may see an error about a missing `ServiceMonitor` custom resource type. This error may be ignored.

With the operator resources created, you may create a `Console` resource like the one shown in [Console Custom Resource Example](#console-custom-resource-example).

## Running locally

Running the console locally requires configuration of any Apache Kafka<sup>®</sup> clusters that will be accessed from the console and (optionally) the use of a Kubernetes cluster that hosts the Strimzi Kafka operator. To get started, you will need to provide a console configuration file and (optionally) credentials to connect to the Kubernetes cluster where Strimzi is operating.

1. Using the [console-config-example.yaml](./console-config-example.yaml) file as an example, create your own configuration
in a file `console-config.yaml` in the repository root. The `compose.yaml` file expects this location to be used and
and difference in name or location requires an adjustment to the compose file.

2. Install the prerequisite software into the Kubernetes cluster. This step assumes none have yet been installed.
```shell
./install/000-install-dependency-operators.sh <your namespace>
./install/001-deploy-prometheus.sh <your namespace> <your cluster base domain>
./install/002-deploy-console-kafka.sh <your namespace> <your cluster base domain>
```
Note that the Prometheus instance will be available at `http://console-prometheus.<your cluster base domain>` when this step
completes.

3. Provide the Prometheus endpoint, the API server endpoint, and the service account token that you would like to use to connect to the Kubernetes cluster. These may be placed in a `compose.env` file that will be detected when starting the console.
2. Install the prerequisite software into the Kubernetes cluster.
* Install the [Strimzi operator](https://strimzi.io/docs/operators/latest/deploying#con-strimzi-installation-methods_str)
* Install the [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md) and create a `Prometheus` instance (_optional_, only if you want to see metrics in the console)
* Create an Apache Kafka<sup>®</sup> cluster. See the [example above](#kafka) This step is only required if you do not already have an existing cluster you would like to use with the console.
3. (_Skip this step if you are not using Kubernetes and Prometheus_) Provide the Prometheus endpoint, the API server endpoint, and the service account token that you would like to use to connect to the Kubernetes cluster. These may be placed in a `compose.env` file that will be detected when starting the console.
```
CONSOLE_API_SERVICE_ACCOUNT_TOKEN=<TOKEN>
CONSOLE_API_KUBERNETES_API_SERVER_URL=https://my-kubernetes-api.example.com:6443
CONSOLE_METRICS_PROMETHEUS_URL=http://console-prometheus.<your cluster base domain>
```
The service account token may be obtain using the `kubectl create token` command. For example, to create a service account
named "console-server" (from [console-server.serviceaccount.yaml](./install/resources/console/console-server.serviceaccount.yaml)
with the correct permissions and a token that expires in 1 year ([yq](https://github.com/mikefarah/yq/releases) required):
The service account token may be obtained using the `kubectl create token` command. For example, to create a service account named "console-server" with the correct permissions and a token that expires in 1 year ([yq](https://github.com/mikefarah/yq/releases) required):
```shell
export NAMESPACE=<service account namespace>
kubectl apply -n ${NAMESPACE} -f ./install/resources/console/console-server.clusterrole.yaml
kubectl apply -n ${NAMESPACE} -f ./install/resources/console/console-server.serviceaccount.yaml
yq '.subjects[0].namespace = strenv(NAMESPACE)' ./install/resources/console/console-server.clusterrolebinding.yaml | kubectl apply -n ${NAMESPACE} -f -
kubectl apply -n ${NAMESPACE} -f ./install/console/010-ServiceAccount-console-server.yaml
kubectl apply -n ${NAMESPACE} -f ./install/console/020-ClusterRole-console-server.yaml
cat ./install/console/030-ClusterRoleBinding-console-server.yaml | envsubst | kubectl apply -n ${NAMESPACE} -f -
kubectl create token console-server -n ${NAMESPACE} --duration=$((365*24))h
```

Expand Down Expand Up @@ -96,3 +154,4 @@ Once approved and the pull request is merged, the release action will execute. T
## License

This project is licensed under the Apache License 2.0 - see the LICENSE file for details.

Loading

0 comments on commit b8b7bc0

Please sign in to comment.